Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/logs/storage-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + '[' -n '' ']' + main + create_infra storage-5599 + local ns=storage-5599 + '[' -n pxc-operator ']' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + kubectl patch pxc -n storage-11772 hostpath --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/hostpath patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.SzfO5gOADk ++ mktemp + local LAST_ERR=/tmp/tmp.ggWG7FtRbx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SzfO5gOADk perconaxtradbcluster.pxc.percona.com "hostpath" deleted from storage-11772 namespace + cat /tmp/tmp.ggWG7FtRbx + rm /tmp/tmp.SzfO5gOADk /tmp/tmp.ggWG7FtRbx + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.LhBY3GbCOT ++ mktemp + local LAST_ERR=/tmp/tmp.qDKpGetcDK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LhBY3GbCOT No resources found + cat /tmp/tmp.qDKpGetcDK + rm /tmp/tmp.LhBY3GbCOT /tmp/tmp.qDKpGetcDK + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.fSXhLEA8zQ ++ mktemp + local LAST_ERR=/tmp/tmp.65aMKVWQXz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fSXhLEA8zQ No resources found + cat /tmp/tmp.65aMKVWQXz + rm /tmp/tmp.fSXhLEA8zQ /tmp/tmp.65aMKVWQXz + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_OUT=/tmp/tmp.IsPRX6c5QP + local LAST_OUT=/tmp/tmp.v1gJyXslTi ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.LN4fhUtfmX + local exit_status=0 + local LAST_ERR=/tmp/tmp.jNvt2gZa4Y + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.v1gJyXslTi + cat /tmp/tmp.jNvt2gZa4Y + rm /tmp/tmp.v1gJyXslTi /tmp/tmp.jNvt2gZa4Y + return 0 namespace "storage-11772" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IsPRX6c5QP namespace "pxc-operator" deleted + cat /tmp/tmp.LN4fhUtfmX + rm /tmp/tmp.IsPRX6c5QP /tmp/tmp.LN4fhUtfmX + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.M0yjma0iMZ ++ mktemp + local LAST_ERR=/tmp/tmp.qvdCG6kuCp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.M0yjma0iMZ namespace/pxc-operator created + cat /tmp/tmp.qvdCG6kuCp + rm /tmp/tmp.M0yjma0iMZ /tmp/tmp.qvdCG6kuCp + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.UAvcnVSxxu +++ mktemp ++ local LAST_ERR=/tmp/tmp.AFZ1bItIkM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UAvcnVSxxu ++ cat /tmp/tmp.AFZ1bItIkM ++ rm /tmp/tmp.UAvcnVSxxu /tmp/tmp.AFZ1bItIkM ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QgozoKc7d4 ++ mktemp + local LAST_ERR=/tmp/tmp.CD5ggsPLAy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QgozoKc7d4 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1" modified. + cat /tmp/tmp.CD5ggsPLAy + rm /tmp/tmp.QgozoKc7d4 /tmp/tmp.CD5ggsPLAy + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6oxW5lGPbL ++ mktemp + local LAST_ERR=/tmp/tmp.ou93iS2TwM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6oxW5lGPbL customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.ou93iS2TwM + rm /tmp/tmp.6oxW5lGPbL /tmp/tmp.ou93iS2TwM + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.3meBEbPV6C ++ mktemp + local LAST_ERR=/tmp/tmp.or5UpKhAwJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3meBEbPV6C clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.or5UpKhAwJ + rm /tmp/tmp.3meBEbPV6C /tmp/tmp.or5UpKhAwJ + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/deploy/cw-operator.yaml + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - ++ mktemp + local LAST_OUT=/tmp/tmp.yxgM8Md61v ++ mktemp + local LAST_ERR=/tmp/tmp.gXntZWU5im + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yxgM8Md61v deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.gXntZWU5im + rm /tmp/tmp.yxgM8Md61v /tmp/tmp.gXntZWU5im + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.WmCKLFMrt6 ++ mktemp + local LAST_ERR=/tmp/tmp.LDtG17aqEI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WmCKLFMrt6 pod/percona-xtradb-cluster-operator-f9b8d74f9-qwn5l condition met + cat /tmp/tmp.LDtG17aqEI + rm /tmp/tmp.WmCKLFMrt6 /tmp/tmp.LDtG17aqEI + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ti2LWpnfsy +++ mktemp ++ local LAST_ERR=/tmp/tmp.bKybfzmChJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ti2LWpnfsy ++ cat /tmp/tmp.bKybfzmChJ ++ rm /tmp/tmp.Ti2LWpnfsy /tmp/tmp.bKybfzmChJ ++ return 0 + wait_pod percona-xtradb-cluster-operator-f9b8d74f9-qwn5l 480 pxc-operator + local pod=percona-xtradb-cluster-operator-f9b8d74f9-qwn5l + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-f9b8d74f9-qwn5l ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-f9b8d74f9-qwn5l condition met waiting for pod/percona-xtradb-cluster-operator-f9b8d74f9-qwn5l to become Ready.Ok + sleep 3 + create_namespace storage-5599 + local namespace=storage-5599 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces storage-5599' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces storage-5599 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace storage-5599 ++ mktemp + local LAST_OUT=/tmp/tmp.aoThhCMhnZ + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.5arzU5mvqR + local exit_status=0 + local LAST_OUT=/tmp/tmp.iB2au6CqyZ ++ seq 0 2 + awk '{print$1}' + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-5599 ++ mktemp + local LAST_ERR=/tmp/tmp.zG728pfsH3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-5599 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iB2au6CqyZ + cat /tmp/tmp.zG728pfsH3 + rm /tmp/tmp.iB2au6CqyZ /tmp/tmp.zG728pfsH3 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.aoThhCMhnZ + cat /tmp/tmp.5arzU5mvqR Error from server (NotFound): namespaces "storage-5599" not found + rm /tmp/tmp.aoThhCMhnZ /tmp/tmp.5arzU5mvqR + return 1 + : + wait_for_delete namespace/storage-5599 + local res=namespace/storage-5599 + echo -n 'waiting for namespace/storage-5599 to be deleted' waiting for namespace/storage-5599 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "storage-5599" not found + desc 'create namespace storage-5599' + set +o xtrace ----------------------------------------------------------------------------------- create namespace storage-5599 ----------------------------------------------------------------------------------- + kubectl_bin create namespace storage-5599 ++ mktemp + local LAST_OUT=/tmp/tmp.XYHiMUtWqF ++ mktemp + local LAST_ERR=/tmp/tmp.lmg4DOLlCK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace storage-5599 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XYHiMUtWqF namespace/storage-5599 created + cat /tmp/tmp.lmg4DOLlCK + rm /tmp/tmp.XYHiMUtWqF /tmp/tmp.lmg4DOLlCK + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.YWbUhgCo3N +++ mktemp ++ local LAST_ERR=/tmp/tmp.J1G9b7VPfM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YWbUhgCo3N ++ cat /tmp/tmp.J1G9b7VPfM ++ rm /tmp/tmp.YWbUhgCo3N /tmp/tmp.J1G9b7VPfM ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1 --namespace=storage-5599 ++ mktemp + local LAST_OUT=/tmp/tmp.cTeJBAQave ++ mktemp + local LAST_ERR=/tmp/tmp.3wNgJCtIVR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1 --namespace=storage-5599 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cTeJBAQave Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2274-91588dd2-9-cluster1" modified. + cat /tmp/tmp.3wNgJCtIVR + rm /tmp/tmp.cTeJBAQave /tmp/tmp.3wNgJCtIVR + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.HbDxGANnft ++ mktemp + local LAST_ERR=/tmp/tmp.1jhjEYCy9a + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HbDxGANnft secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.1jhjEYCy9a + rm /tmp/tmp.HbDxGANnft /tmp/tmp.1jhjEYCy9a + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + kubectl_bin apply -f - + local pvc_name= + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.DkAz7N17BA + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-5599~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml ++ mktemp + local LAST_ERR=/tmp/tmp.8VYSIy9rfd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DkAz7N17BA deployment.apps/pxc-client created + cat /tmp/tmp.8VYSIy9rfd + rm /tmp/tmp.DkAz7N17BA /tmp/tmp.8VYSIy9rfd + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath-helper.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Aexyl5QJsg ++ mktemp + local LAST_ERR=/tmp/tmp.aqzR1pT8q6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath-helper.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Aexyl5QJsg secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created daemonset.apps/hostpath-helper created + cat /tmp/tmp.aqzR1pT8q6 + rm /tmp/tmp.Aexyl5QJsg /tmp/tmp.aqzR1pT8q6 + return 0 + desc 'check emptydir' + set +o xtrace ----------------------------------------------------------------------------------- check emptydir ----------------------------------------------------------------------------------- + check_cr_config emptydir + local cluster=emptydir + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + spinup_pxc emptydir /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + local cluster=emptydir + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kfzLsHCdCm ++ mktemp + local LAST_ERR=/tmp/tmp.cYr7Si2v0t + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kfzLsHCdCm secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.cYr7Si2v0t + rm /tmp/tmp.kfzLsHCdCm /tmp/tmp.cYr7Si2v0t + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml '' + kubectl_bin apply -f - + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2#' + local LAST_OUT=/tmp/tmp.kGx5ZwRVIr + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-5599~ ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.Usx92pLeLx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kGx5ZwRVIr deployment.apps/pxc-client unchanged + cat /tmp/tmp.Usx92pLeLx + rm /tmp/tmp.kGx5ZwRVIr /tmp/tmp.Usx92pLeLx + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.fISkW5DWh0 + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_ERR=/tmp/tmp.WlubLZSm3t + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-5599~ + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fISkW5DWh0 perconaxtradbcluster.pxc.percona.com/emptydir created + cat /tmp/tmp.WlubLZSm3t + rm /tmp/tmp.fISkW5DWh0 /tmp/tmp.WlubLZSm3t + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy emptydir ++ local target_cluster=emptydir +++ kubectl_bin get pxc emptydir -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V2jv9Yytd3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FgWAJc4rGn +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc emptydir -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.V2jv9Yytd3 +++ cat /tmp/tmp.FgWAJc4rGn +++ rm /tmp/tmp.V2jv9Yytd3 /tmp/tmp.FgWAJc4rGn +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc emptydir -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MgNRg96M0Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fU0HlCiyuV +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc emptydir -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.MgNRg96M0Y +++ cat /tmp/tmp.fU0HlCiyuV +++ rm /tmp/tmp.MgNRg96M0Y /tmp/tmp.fU0HlCiyuV +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo emptydir-proxysql ++ return + local proxy=emptydir-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 ++ mktemp + local LAST_OUT=/tmp/tmp.R3N22tB6ok ++ mktemp + local LAST_ERR=/tmp/tmp.7wStXrfOX5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.R3N22tB6ok + cat /tmp/tmp.7wStXrfOX5 error: no matching resources found + rm /tmp/tmp.R3N22tB6ok /tmp/tmp.7wStXrfOX5 + return 1 + true + wait_for_running emptydir-proxysql 1 + local name=emptydir-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-proxysql-0 480 + local pod=emptydir-proxysql-0 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo emptydir-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=proxysql + set +o xtrace pod/emptydir-proxysql-0 condition met waiting for pod/emptydir-proxysql-0 to become Ready.Ok + wait_for_running emptydir-pxc 3 + local name=emptydir-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-0 480 + local pod=emptydir-pxc-0 + local max_retry=480 + local ns= ++ echo emptydir-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/emptydir-pxc-0 condition met waiting for pod/emptydir-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-1 480 + local pod=emptydir-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo emptydir-pxc-1 + local container=pxc + set +o xtrace pod/emptydir-pxc-1 condition met waiting for pod/emptydir-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-2 480 + local pod=emptydir-pxc-2 + local max_retry=480 + local ns= ++ echo emptydir-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/emptydir-pxc-2 condition met waiting for pod/emptydir-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc emptydir -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.xXTeQqrh7b +++ mktemp ++ local LAST_ERR=/tmp/tmp.VY9WqdepPd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xXTeQqrh7b ++ cat /tmp/tmp.VY9WqdepPd ++ rm /tmp/tmp.xXTeQqrh7b /tmp/tmp.VY9WqdepPd ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oqgvxsgjGg +++ mktemp ++ local LAST_ERR=/tmp/tmp.gvXKd1m42V ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oqgvxsgjGg ++ cat /tmp/tmp.gvXKd1m42V ++ rm /tmp/tmp.oqgvxsgjGg /tmp/tmp.gvXKd1m42V ++ return 0 + client_pod=pxc-client-59944c5bbf-r7p99 + wait_pod pxc-client-59944c5bbf-r7p99 + local pod=pxc-client-59944c5bbf-r7p99 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-r7p99 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-r7p99 condition met waiting for pod/pxc-client-59944c5bbf-r7p99 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nXGxVmcBNC +++ mktemp ++ local LAST_ERR=/tmp/tmp.yRiwnONfXC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nXGxVmcBNC ++ cat /tmp/tmp.yRiwnONfXC ++ rm /tmp/tmp.nXGxVmcBNC /tmp/tmp.yRiwnONfXC ++ return 0 + client_pod=pxc-client-59944c5bbf-r7p99 + wait_pod pxc-client-59944c5bbf-r7p99 + local pod=pxc-client-59944c5bbf-r7p99 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-r7p99 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-r7p99 condition met waiting for pod/pxc-client-59944c5bbf-r7p99 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uL0g8f4y8w +++ mktemp ++ local LAST_ERR=/tmp/tmp.6jwoV2IzLX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uL0g8f4y8w ++ cat /tmp/tmp.6jwoV2IzLX ++ rm /tmp/tmp.uL0g8f4y8w /tmp/tmp.6jwoV2IzLX ++ return 0 + client_pod=pxc-client-59944c5bbf-r7p99 + wait_pod pxc-client-59944c5bbf-r7p99 + local pod=pxc-client-59944c5bbf-r7p99 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-r7p99 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-r7p99 condition met waiting for pod/pxc-client-59944c5bbf-r7p99 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.SmSknRkDmp/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql /tmp/tmp.SmSknRkDmp/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4343c7FSIT +++ mktemp ++ local LAST_ERR=/tmp/tmp.JfRJ7UkpxH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4343c7FSIT ++ cat /tmp/tmp.JfRJ7UkpxH ++ rm /tmp/tmp.4343c7FSIT /tmp/tmp.JfRJ7UkpxH ++ return 0 + client_pod=pxc-client-59944c5bbf-r7p99 + wait_pod pxc-client-59944c5bbf-r7p99 + local pod=pxc-client-59944c5bbf-r7p99 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-r7p99 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-r7p99 condition met waiting for pod/pxc-client-59944c5bbf-r7p99 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.SmSknRkDmp/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql /tmp/tmp.SmSknRkDmp/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AJsRMKkFv0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ysLTsnyo2g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AJsRMKkFv0 ++ cat /tmp/tmp.ysLTsnyo2g ++ rm /tmp/tmp.AJsRMKkFv0 /tmp/tmp.ysLTsnyo2g ++ return 0 + client_pod=pxc-client-59944c5bbf-r7p99 + wait_pod pxc-client-59944c5bbf-r7p99 + local pod=pxc-client-59944c5bbf-r7p99 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-r7p99 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-r7p99 condition met waiting for pod/pxc-client-59944c5bbf-r7p99 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.SmSknRkDmp/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/select-1.sql /tmp/tmp.SmSknRkDmp/select-1.sql ++ is_keyring_plugin_in_use emptydir ++ local cluster=emptydir ++ grep -E -o 'early-plugin-load=keyring_\w+.so' ++ kubectl_bin exec -it emptydir-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GUIWMz44GC +++ mktemp ++ local LAST_ERR=/tmp/tmp.Re0fXCvNDh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it emptydir-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GUIWMz44GC ++ cat /tmp/tmp.Re0fXCvNDh Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.GUIWMz44GC /tmp/tmp.Re0fXCvNDh ++ return 0 + '[' '' ']' + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/emptydir-pxc + local resource=statefulset/emptydir-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc.yml + local new_result=/tmp/tmp.SmSknRkDmp/statefulset_emptydir-pxc.yml + desc 'compare statefulset/emptydir-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/emptydir-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/emptydir-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-5599", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.MhB9zP7FQj ++ mktemp + local LAST_ERR=/tmp/tmp.hdR3xtlkkc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/emptydir-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MhB9zP7FQj + cat /tmp/tmp.hdR3xtlkkc + rm /tmp/tmp.MhB9zP7FQj /tmp/tmp.hdR3xtlkkc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml /tmp/tmp.SmSknRkDmp/statefulset_emptydir-pxc.yml + log 'compare_kubectl: statefulset/emptydir-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-30T11:01:24+0000]' compare_kubectl: statefulset/emptydir-pxc OK [2025-11-30T11:01:24+0000] compare_kubectl: statefulset/emptydir-pxc OK + compare_kubectl statefulset/emptydir-proxysql + local resource=statefulset/emptydir-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql.yml + local new_result=/tmp/tmp.SmSknRkDmp/statefulset_emptydir-proxysql.yml + desc 'compare statefulset/emptydir-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/emptydir-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/emptydir-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-5599", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.bHPDTuFQ0Y ++ mktemp + local LAST_ERR=/tmp/tmp.ZSm6PDvoaO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/emptydir-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bHPDTuFQ0Y + cat /tmp/tmp.ZSm6PDvoaO + rm /tmp/tmp.bHPDTuFQ0Y /tmp/tmp.ZSm6PDvoaO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml /tmp/tmp.SmSknRkDmp/statefulset_emptydir-proxysql.yml + log 'compare_kubectl: statefulset/emptydir-proxysql OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-30T11:01:25+0000]' compare_kubectl: statefulset/emptydir-proxysql OK [2025-11-30T11:01:25+0000] compare_kubectl: statefulset/emptydir-proxysql OK + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml ++ mktemp + local LAST_OUT=/tmp/tmp.8XWtEuifc7 ++ mktemp + local LAST_ERR=/tmp/tmp.8gFBKvVt2g + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/emptydir.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8XWtEuifc7 perconaxtradbcluster.pxc.percona.com "emptydir" deleted from storage-5599 namespace + cat /tmp/tmp.8gFBKvVt2g + rm /tmp/tmp.8XWtEuifc7 /tmp/tmp.8gFBKvVt2g + return 0 + desc 'check hostpath' + set +o xtrace ----------------------------------------------------------------------------------- check hostpath ----------------------------------------------------------------------------------- + check_cr_config hostpath + local cluster=hostpath + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + spinup_pxc hostpath /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml + local cluster=hostpath + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.LbdfMKn2iH ++ mktemp + local LAST_ERR=/tmp/tmp.mkHHPgwJpq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LbdfMKn2iH secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.mkHHPgwJpq + rm /tmp/tmp.LbdfMKn2iH /tmp/tmp.mkHHPgwJpq + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-5599~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.I8C5OGSjNd ++ mktemp + local LAST_ERR=/tmp/tmp.3q9jAFqTa1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.I8C5OGSjNd deployment.apps/pxc-client unchanged + cat /tmp/tmp.3q9jAFqTa1 + rm /tmp/tmp.I8C5OGSjNd /tmp/tmp.3q9jAFqTa1 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2274/e2e-tests/storage/conf/hostpath.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.3legmk0X1L + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-5599~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.uVj0eUPqyN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3legmk0X1L perconaxtradbcluster.pxc.percona.com/hostpath created + cat /tmp/tmp.uVj0eUPqyN + rm /tmp/tmp.3legmk0X1L /tmp/tmp.uVj0eUPqyN + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy hostpath ++ local target_cluster=hostpath +++ kubectl_bin get pxc hostpath -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.THbL8Nhfjn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l2mZ3kk15k +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc hostpath -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.THbL8Nhfjn +++ cat /tmp/tmp.l2mZ3kk15k +++ rm /tmp/tmp.THbL8Nhfjn /tmp/tmp.l2mZ3kk15k +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc hostpath -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2ydpwi6Zoa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Kq4KTJ3wkk +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc hostpath -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2ydpwi6Zoa +++ cat /tmp/tmp.Kq4KTJ3wkk +++ rm /tmp/tmp.2ydpwi6Zoa /tmp/tmp.Kq4KTJ3wkk +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo hostpath-proxysql ++ return + local proxy=hostpath-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 ++ mktemp + local LAST_OUT=/tmp/tmp.QuVefHXZu4 ++ mktemp + local LAST_ERR=/tmp/tmp.GrKib576k5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-5599 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.QuVefHXZu4 + cat /tmp/tmp.GrKib576k5 error: no matching resources found + rm /tmp/tmp.QuVefHXZu4 /tmp/tmp.GrKib576k5 + return 1 + true + wait_for_running hostpath-proxysql 1 + local name=hostpath-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-proxysql-0 480 + local pod=hostpath-proxysql-0 + local max_retry=480 + local ns= ++ echo hostpath-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/hostpath-proxysql-0 condition met waiting for pod/hostpath-proxysql-0 to become Ready.Ok + wait_for_running hostpath-pxc 3 + local name=hostpath-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-pxc-0 480 + local pod=hostpath-pxc-0 + local max_retry=480 + local ns= ++ echo hostpath-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace error: timed out waiting for the condition on pods/hostpath-pxc-0 waiting for pod/hostpath-pxc-0 to become Ready................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................Name: hostpath-pxc-0 Namespace: storage-5599 Priority: 0 Service Account: default Node: gke-jen-pxc-2274-91588dd-default-pool-0fa07ced-lll7/10.208.0.34 Start Time: Sun, 30 Nov 2025 11:01:34 +0000 Labels: app.kubernetes.io/component=pxc app.kubernetes.io/instance=hostpath app.kubernetes.io/managed-by=percona-xtradb-cluster-operator app.kubernetes.io/name=percona-xtradb-cluster app.kubernetes.io/part-of=percona-xtradb-cluster apps.kubernetes.io/pod-index=0 controller-revision-hash=hostpath-pxc-674d7c5f5d statefulset.kubernetes.io/pod-name=hostpath-pxc-0 Annotations: kubectl.kubernetes.io/default-container: pxc percona.com/configuration-hash: d41d8cd98f00b204e9800998ecf8427e percona.com/ssl-hash: cfd6a52398268173b51d7cdb1331c09a Status: Pending IP: 10.86.201.35 IPs: IP: 10.86.201.35 Controlled By: StatefulSet/hostpath-pxc Init Containers: pxc-init: Container ID: containerd://46e2d40916469044dcff881513739b8289e3281d448ef75c599e0b0ddbf5c983 Image: perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2 Image ID: docker.io/perconalab/percona-xtradb-cluster-operator@sha256:62726f886162d0fbd760af9c67d0e1d7d7f5a909823aa201930caf24d38d8952 Port: Host Port: Command: /pxc-init-entrypoint.sh State: Waiting Reason: CrashLoopBackOff Last State: Terminated Reason: OOMKilled Exit Code: 137 Started: Sun, 30 Nov 2025 11:35:08 +0000 Finished: Sun, 30 Nov 2025 11:35:20 +0000 Ready: False Restart Count: 11 Limits: cpu: 50m memory: 50M Requests: cpu: 50m memory: 50M Environment: Mounts: /var/lib/mysql from datadir (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4hnnv (ro) Containers: pxc: Container ID: Image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0 Image ID: Ports: 3306/TCP (mysql), 4444/TCP (sst), 4567/TCP (write-set), 4568/TCP (ist), 33062/TCP (mysql-admin), 33060/TCP (mysqlx) Host Ports: 0/TCP (mysql), 0/TCP (sst), 0/TCP (write-set), 0/TCP (ist), 0/TCP (mysql-admin), 0/TCP (mysqlx) Command: /var/lib/mysql/pxc-entrypoint.sh Args: mysqld State: Waiting Reason: PodInitializing Ready: False Restart Count: 0 Liveness: exec [/var/lib/mysql/liveness-check.sh] delay=300s timeout=5s period=10s #success=1 #failure=3 Readiness: exec [/var/lib/mysql/readiness-check.sh] delay=15s timeout=15s period=30s #success=1 #failure=5 Environment Variables from: hostpath-env-vars-pxc Secret Optional: true Environment: PXC_SERVICE: hostpath-pxc-unready MONITOR_HOST: % MYSQL_ROOT_PASSWORD: Optional: false XTRABACKUP_PASSWORD: Optional: false MONITOR_PASSWORD: Optional: false CLUSTER_HASH: 1500080 OPERATOR_ADMIN_PASSWORD: Optional: false LIVENESS_CHECK_TIMEOUT: 5 READINESS_CHECK_TIMEOUT: 15 DEFAULT_AUTHENTICATION_PLUGIN: caching_sha2_password MYSQL_NOTIFY_SOCKET: /var/lib/mysql/notify.sock MYSQL_STATE_FILE: /var/lib/mysql/mysql.state Mounts: /etc/my.cnf.d from auto-config (rw) /etc/mysql/init-file from mysql-init-file (rw) /etc/mysql/mysql-users-secret from mysql-users-secret-file (rw) /etc/mysql/ssl from ssl (rw) /etc/mysql/ssl-internal from ssl-internal (rw) /etc/mysql/vault-keyring-secret from vault-keyring-secret (rw) /etc/percona-xtradb-cluster.conf.d from config (rw) /tmp from tmp (rw) /var/lib/mysql from datadir (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4hnnv (ro) Conditions: Type Status PodReadyToStartContainers True Initialized False Ready False ContainersReady False PodScheduled True Volumes: datadir: Type: HostPath (bare host directory volume) Path: /run/data-dir HostPathType: Directory tmp: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: config: Type: ConfigMap (a volume populated by a ConfigMap) Name: hostpath-pxc Optional: true ssl-internal: Type: Secret (a volume populated by a Secret) SecretName: hostpath-ssl-internal Optional: true ssl: Type: Secret (a volume populated by a Secret) SecretName: some-name-ssl Optional: false auto-config: Type: ConfigMap (a volume populated by a ConfigMap) Name: auto-hostpath-pxc Optional: true vault-keyring-secret: Type: Secret (a volume populated by a Secret) SecretName: hostpath-vault Optional: true mysql-users-secret-file: Type: Secret (a volume populated by a Secret) SecretName: internal-hostpath Optional: false mysql-init-file: Type: Secret (a volume populated by a Secret) SecretName: hostpath-mysql-init Optional: true kube-api-access-4hnnv: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true QoS Class: Burstable Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 36m default-scheduler Successfully assigned storage-5599/hostpath-pxc-0 to gke-jen-pxc-2274-91588dd-default-pool-0fa07ced-lll7 Normal Pulled 36m kubelet Successfully pulled image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" in 139ms (139ms including waiting). Image size: 147369640 bytes. Normal Pulled 36m kubelet Successfully pulled image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" in 169ms (169ms including waiting). Image size: 147369640 bytes. Normal Pulled 35m kubelet Successfully pulled image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" in 115ms (115ms including waiting). Image size: 147369640 bytes. Normal Started 35m (x4 over 36m) kubelet Started container pxc-init Normal Pulled 35m kubelet Successfully pulled image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" in 127ms (127ms including waiting). Image size: 147369640 bytes. Normal Pulling 34m (x5 over 36m) kubelet Pulling image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" Normal Created 34m (x5 over 36m) kubelet Created container: pxc-init Normal Pulled 34m kubelet Successfully pulled image "perconalab/percona-xtradb-cluster-operator:PR-2274-91588dd2" in 135ms (135ms including waiting). Image size: 147369640 bytes. Warning BackOff 82s (x152 over 36m) kubelet Back-off restarting failed container pxc-init in pod hostpath-pxc-0_storage-5599(cb7d731d-ff63-4247-b924-ef536ee6d57c) Error from server (BadRequest): container "pxc" in pod "hostpath-pxc-0" is waiting to start: PodInitializing