Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/logs/smart-update1-8-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + API=pxc.percona.com/v9-9-9 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + CLUSTER=smart-update + CLUSTER_SIZE=3 + PROXY_SIZE=2 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + PXC_VER=8.0 + TARGET_IMAGE_PXC_VS=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + VS_URL=http://version-service + VS_PORT=11000 + VS_ENDPOINT=http://version-service:11000 + main + create_infra smart-update1-26886 + local ns=smart-update1-26886 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n smart-update1-26078 smart-update --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/smart-update patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.mRLRINDiqt ++ mktemp + local LAST_ERR=/tmp/tmp.gzwhOJ2f18 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mRLRINDiqt perconaxtradbcluster.pxc.percona.com "smart-update" deleted + cat /tmp/tmp.gzwhOJ2f18 + rm /tmp/tmp.mRLRINDiqt /tmp/tmp.gzwhOJ2f18 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.LIW7mWZETD ++ mktemp + local LAST_ERR=/tmp/tmp.vVXvIr4FsJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LIW7mWZETD No resources found + cat /tmp/tmp.vVXvIr4FsJ + rm /tmp/tmp.LIW7mWZETD /tmp/tmp.vVXvIr4FsJ + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.3ikHbbHDDE ++ mktemp + local LAST_ERR=/tmp/tmp.tqVTNsYCEE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3ikHbbHDDE No resources found + cat /tmp/tmp.tqVTNsYCEE + rm /tmp/tmp.3ikHbbHDDE /tmp/tmp.tqVTNsYCEE + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.lPnHMyRJrr + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_ERR=/tmp/tmp.KNe1xK68Tk + local exit_status=0 + kubectl_bin get ns ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.UOa1x54kES ++ mktemp + local LAST_ERR=/tmp/tmp.FFRHy4P7ot + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UOa1x54kES + cat /tmp/tmp.FFRHy4P7ot + rm /tmp/tmp.UOa1x54kES /tmp/tmp.FFRHy4P7ot + return 0 namespace "cert-manager" deleted namespace "smart-update1-26078" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lPnHMyRJrr namespace "pxc-operator" deleted + cat /tmp/tmp.KNe1xK68Tk + rm /tmp/tmp.lPnHMyRJrr /tmp/tmp.KNe1xK68Tk + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2KBY0a3jEt ++ mktemp + local LAST_ERR=/tmp/tmp.jxzzQaL28T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2KBY0a3jEt namespace/pxc-operator created + cat /tmp/tmp.jxzzQaL28T + rm /tmp/tmp.2KBY0a3jEt /tmp/tmp.jxzzQaL28T + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Lnrl6coYB +++ mktemp ++ local LAST_ERR=/tmp/tmp.H6wUMDphwH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2Lnrl6coYB ++ cat /tmp/tmp.H6wUMDphwH ++ rm /tmp/tmp.2Lnrl6coYB /tmp/tmp.H6wUMDphwH ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Hyk13E1Wm7 ++ mktemp + local LAST_ERR=/tmp/tmp.n7ZoINVrnX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Hyk13E1Wm7 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2" modified. + cat /tmp/tmp.n7ZoINVrnX + rm /tmp/tmp.Hyk13E1Wm7 /tmp/tmp.n7ZoINVrnX + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.MWe5dyweZ7 ++ mktemp + local LAST_ERR=/tmp/tmp.wFpp2sUXeH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MWe5dyweZ7 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.wFpp2sUXeH + rm /tmp/tmp.MWe5dyweZ7 /tmp/tmp.wFpp2sUXeH + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.VawiLl40oZ ++ mktemp + local LAST_ERR=/tmp/tmp.6XcLkcavQU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VawiLl40oZ clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.6XcLkcavQU + rm /tmp/tmp.VawiLl40oZ /tmp/tmp.6XcLkcavQU + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1732-9c5a0688^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - ++ mktemp + local LAST_OUT=/tmp/tmp.T2Bym2iD4f + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - ++ mktemp + local LAST_ERR=/tmp/tmp.3WxHq1cEGJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.T2Bym2iD4f deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.3WxHq1cEGJ + rm /tmp/tmp.T2Bym2iD4f /tmp/tmp.3WxHq1cEGJ + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.OpQrkS1WGW ++ mktemp + local LAST_ERR=/tmp/tmp.9xFtKDpmUj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OpQrkS1WGW pod/percona-xtradb-cluster-operator-bb65db757-kjkw8 condition met + cat /tmp/tmp.9xFtKDpmUj + rm /tmp/tmp.OpQrkS1WGW /tmp/tmp.9xFtKDpmUj + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.U4SWpxtq0I +++ mktemp ++ local LAST_ERR=/tmp/tmp.sI65wPH5Hv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U4SWpxtq0I ++ cat /tmp/tmp.sI65wPH5Hv ++ rm /tmp/tmp.U4SWpxtq0I /tmp/tmp.sI65wPH5Hv ++ return 0 + wait_pod percona-xtradb-cluster-operator-bb65db757-kjkw8 480 pxc-operator + local pod=percona-xtradb-cluster-operator-bb65db757-kjkw8 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-bb65db757-kjkw8 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-bb65db757-kjkw8 condition met percona-xtradb-cluster-operator-bb65db757-kjkw8.Ok + sleep 3 + create_namespace smart-update1-26886 + local namespace=smart-update1-26886 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces smart-update1-26886' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces smart-update1-26886 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace smart-update1-26886 + xargs kubectl delete ns ++ mktemp + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.tgRJsdkBhM ++ mktemp + local LAST_ERR=/tmp/tmp.3RvXFAKhKp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.IMmhu4Rm54 ++ mktemp + local LAST_ERR=/tmp/tmp.Z9BuzWvTpN + local exit_status=0 ++ seq 0 2 + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-26886 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-26886 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tgRJsdkBhM + cat /tmp/tmp.3RvXFAKhKp + rm /tmp/tmp.tgRJsdkBhM /tmp/tmp.3RvXFAKhKp + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-26886 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.IMmhu4Rm54 + cat /tmp/tmp.Z9BuzWvTpN Error from server (NotFound): namespaces "smart-update1-26886" not found + rm /tmp/tmp.IMmhu4Rm54 /tmp/tmp.Z9BuzWvTpN + return 1 + : + wait_for_delete namespace/smart-update1-26886 + local res=namespace/smart-update1-26886 + echo -n 'namespace/smart-update1-26886 - ' namespace/smart-update1-26886 - + set +o xtrace Error from server (NotFound): namespaces "smart-update1-26886" not found + desc 'create namespace smart-update1-26886' + set +o xtrace ----------------------------------------------------------------------------------- create namespace smart-update1-26886 ----------------------------------------------------------------------------------- + kubectl_bin create namespace smart-update1-26886 ++ mktemp + local LAST_OUT=/tmp/tmp.89KxrnCuF8 ++ mktemp + local LAST_ERR=/tmp/tmp.b67y32mYzi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace smart-update1-26886 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.89KxrnCuF8 namespace/smart-update1-26886 created + cat /tmp/tmp.b67y32mYzi + rm /tmp/tmp.89KxrnCuF8 /tmp/tmp.b67y32mYzi + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.q11okYq6O4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2kI5uv7K6t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.q11okYq6O4 ++ cat /tmp/tmp.2kI5uv7K6t ++ rm /tmp/tmp.q11okYq6O4 /tmp/tmp.2kI5uv7K6t ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2 --namespace=smart-update1-26886 ++ mktemp + local LAST_OUT=/tmp/tmp.FSWQaQEnZ4 ++ mktemp + local LAST_ERR=/tmp/tmp.9bR6p4D9kM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2 --namespace=smart-update1-26886 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FSWQaQEnZ4 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1732-9c5a0688-1-cluster2" modified. + cat /tmp/tmp.9bR6p4D9kM + rm /tmp/tmp.FSWQaQEnZ4 /tmp/tmp.9bR6p4D9kM + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gyiJCRjnj3 ++ mktemp + local LAST_ERR=/tmp/tmp.S6iethZa7w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gyiJCRjnj3 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.S6iethZa7w + rm /tmp/tmp.gyiJCRjnj3 /tmp/tmp.S6iethZa7w + return 0 + deploy_version_service + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.c84pgrKbMn ++ mktemp + local LAST_ERR=/tmp/tmp.8z8nuTiS80 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c84pgrKbMn configmap/versions created + cat /tmp/tmp.8z8nuTiS80 + rm /tmp/tmp.c84pgrKbMn /tmp/tmp.8z8nuTiS80 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.2R0v9WQ0mi ++ mktemp + local LAST_ERR=/tmp/tmp.JW1yNkARcV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2R0v9WQ0mi deployment.apps/version-service created service/version-service created + cat /tmp/tmp.JW1yNkARcV + rm /tmp/tmp.2R0v9WQ0mi /tmp/tmp.JW1yNkARcV + return 0 + sleep 10 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.Zllsh78ZOf ++ mktemp + local LAST_ERR=/tmp/tmp.e1t2dKH2pD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Zllsh78ZOf namespace/cert-manager created + cat /tmp/tmp.e1t2dKH2pD + rm /tmp/tmp.Zllsh78ZOf /tmp/tmp.e1t2dKH2pD + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.lte5nVEv0s ++ mktemp + local LAST_ERR=/tmp/tmp.v1edINF4Ju + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lte5nVEv0s namespace/cert-manager labeled + cat /tmp/tmp.v1edINF4Ju + rm /tmp/tmp.lte5nVEv0s /tmp/tmp.v1edINF4Ju + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.v4ekTS1Zwu ++ mktemp + local LAST_ERR=/tmp/tmp.s1DkKiFPX3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.v4ekTS1Zwu namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.s1DkKiFPX3 Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.v4ekTS1Zwu /tmp/tmp.s1DkKiFPX3 + return 0 + '[' '' == 4.10 ']' + sleep 70 ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ tail -n3 ++ grep :8.0 ++ sort -V ++ head -n1 +++ get_operator_pod +++ local label_prefix=app.kubernetes.io/ ++++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++++ grep -c percona-xtradb-cluster-operator +++ local check_label=1 +++ [[ 1 -eq 0 ]] +++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cWYPdE2V1n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OgMbrZ7R9r +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.cWYPdE2V1n +++ cat /tmp/tmp.OgMbrZ7R9r +++ rm /tmp/tmp.cWYPdE2V1n /tmp/tmp.OgMbrZ7R9r +++ return 0 ++ kubectl_bin exec -ti percona-xtradb-cluster-operator-bb65db757-kjkw8 -n pxc-operator -- curl -s http://version-service.smart-update1-26886.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 +++ mktemp ++ local LAST_OUT=/tmp/tmp.C5Y69KkHpE +++ mktemp ++ local LAST_ERR=/tmp/tmp.9RgsvtAFWQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -ti percona-xtradb-cluster-operator-bb65db757-kjkw8 -n pxc-operator -- curl -s http://version-service.smart-update1-26886.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.C5Y69KkHpE ++ cat /tmp/tmp.9RgsvtAFWQ Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.C5Y69KkHpE /tmp/tmp.9RgsvtAFWQ ++ return 0 + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.19-10.1 + desc 'patch crd' + set +o xtrace ----------------------------------------------------------------------------------- patch crd ----------------------------------------------------------------------------------- + kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.mtzO31iuzh ++ mktemp + local LAST_ERR=/tmp/tmp.SGmPbLhWTF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mtzO31iuzh customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com patched + cat /tmp/tmp.SGmPbLhWTF + rm /tmp/tmp.mtzO31iuzh /tmp/tmp.SGmPbLhWTF + return 0 + desc 'Updating ProxySQL PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- Updating ProxySQL PXC cluster ----------------------------------------------------------------------------------- + cp -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/conf/smart-update.yml /tmp/tmp.pbd6iPrXdE/smart-update.yml + yq -i eval '.spec.initContainer.image = "perconalab/percona-xtradb-cluster-operator:PR-1732-9c5a0688"' /tmp/tmp.pbd6iPrXdE/smart-update.yml + spinup_pxc smart-update /tmp/tmp.pbd6iPrXdE/smart-update.yml + local cluster=smart-update + local config=/tmp/tmp.pbd6iPrXdE/smart-update.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fQBWj806Cv ++ mktemp + local LAST_ERR=/tmp/tmp.YykmOoK2kv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fQBWj806Cv secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.YykmOoK2kv + rm /tmp/tmp.fQBWj806Cv /tmp/tmp.YykmOoK2kv + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + local LAST_OUT=/tmp/tmp.eTGUFcLPU6 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1732-9c5a0688#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update1-26886~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.6ec4gOJgfI + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eTGUFcLPU6 deployment.apps/pxc-client created + cat /tmp/tmp.6ec4gOJgfI + rm /tmp/tmp.eTGUFcLPU6 /tmp/tmp.6ec4gOJgfI + return 0 + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 5\.7 ]] + apply_config /tmp/tmp.pbd6iPrXdE/smart-update.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.pbd6iPrXdE/smart-update.yml + cat /tmp/tmp.pbd6iPrXdE/smart-update.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update1-26886~ + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.WAOpidIxUr + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1732-9c5a0688#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + local LAST_ERR=/tmp/tmp.DMrs6eE2vW + local exit_status=0 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WAOpidIxUr perconaxtradbcluster.pxc.percona.com/smart-update created + cat /tmp/tmp.DMrs6eE2vW + rm /tmp/tmp.WAOpidIxUr /tmp/tmp.DMrs6eE2vW + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy smart-update ++ local target_cluster=smart-update +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uIEje4IFKk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rEA64ZvMcU +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uIEje4IFKk +++ cat /tmp/tmp.rEA64ZvMcU +++ rm /tmp/tmp.uIEje4IFKk /tmp/tmp.rEA64ZvMcU +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fb0qoJJn55 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IRaEa59nPA +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.fb0qoJJn55 +++ cat /tmp/tmp.IRaEa59nPA +++ rm /tmp/tmp.fb0qoJJn55 /tmp/tmp.IRaEa59nPA +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo smart-update-proxysql ++ return + local proxy=smart-update-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-26886 ++ mktemp + local LAST_OUT=/tmp/tmp.CaZRw5I3x0 ++ mktemp + local LAST_ERR=/tmp/tmp.sS5b60KkNd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-26886 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-26886 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-26886 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.CaZRw5I3x0 + cat /tmp/tmp.sS5b60KkNd error: no matching resources found + rm /tmp/tmp.CaZRw5I3x0 /tmp/tmp.sS5b60KkNd + return 1 + true + wait_for_running smart-update-proxysql 1 + local name=smart-update-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-proxysql-0 480 + local pod=smart-update-proxysql-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo smart-update-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=proxysql + set +o xtrace pod/smart-update-proxysql-0 condition met smart-update-proxysql-0.Ok + wait_for_running smart-update-pxc 3 + local name=smart-update-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-0 480 + local pod=smart-update-pxc-0 + local max_retry=480 + local ns= ++ echo smart-update-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-0 condition met smart-update-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-1 480 + local pod=smart-update-pxc-1 + local max_retry=480 + local ns= ++ echo smart-update-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-1 condition met smart-update-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-2 480 + local pod=smart-update-pxc-2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo smart-update-pxc-2 + local container=pxc + set +o xtrace pod/smart-update-pxc-2 condition met smart-update-pxc-2.Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h smart-update-proxysql -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h smart-update-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BEbB2i1C4O +++ mktemp ++ local LAST_ERR=/tmp/tmp.6CYz6raHij ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BEbB2i1C4O ++ cat /tmp/tmp.6CYz6raHij ++ rm /tmp/tmp.BEbB2i1C4O /tmp/tmp.6CYz6raHij ++ return 0 + client_pod=pxc-client-6644d8898f-c8cvc + wait_pod pxc-client-6644d8898f-c8cvc + local pod=pxc-client-6644d8898f-c8cvc + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-c8cvc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-c8cvc condition met pxc-client-6644d8898f-c8cvc.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h smart-update-proxysql -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h smart-update-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oEmk5xPWYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.mMmSCqxsES ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oEmk5xPWYK ++ cat /tmp/tmp.mMmSCqxsES ++ rm /tmp/tmp.oEmk5xPWYK /tmp/tmp.mMmSCqxsES ++ return 0 + client_pod=pxc-client-6644d8898f-c8cvc + wait_pod pxc-client-6644d8898f-c8cvc + local pod=pxc-client-6644d8898f-c8cvc + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-6644d8898f-c8cvc + local container= + set +o xtrace pod/pxc-client-6644d8898f-c8cvc condition met pxc-client-6644d8898f-c8cvc.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qwFFFtziRT +++ mktemp ++ local LAST_ERR=/tmp/tmp.XqhtGYFPMb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qwFFFtziRT ++ cat /tmp/tmp.XqhtGYFPMb ++ rm /tmp/tmp.qwFFFtziRT /tmp/tmp.XqhtGYFPMb ++ return 0 + client_pod=pxc-client-6644d8898f-c8cvc + wait_pod pxc-client-6644d8898f-c8cvc + local pod=pxc-client-6644d8898f-c8cvc + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-6644d8898f-c8cvc + local container= + set +o xtrace pod/pxc-client-6644d8898f-c8cvc condition met pxc-client-6644d8898f-c8cvc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.pbd6iPrXdE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql /tmp/tmp.pbd6iPrXdE/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AU88L9rAFt +++ mktemp ++ local LAST_ERR=/tmp/tmp.NaPShhpNPK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AU88L9rAFt ++ cat /tmp/tmp.NaPShhpNPK ++ rm /tmp/tmp.AU88L9rAFt /tmp/tmp.NaPShhpNPK ++ return 0 + client_pod=pxc-client-6644d8898f-c8cvc + wait_pod pxc-client-6644d8898f-c8cvc + local pod=pxc-client-6644d8898f-c8cvc + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-c8cvc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-c8cvc condition met pxc-client-6644d8898f-c8cvc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.pbd6iPrXdE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql /tmp/tmp.pbd6iPrXdE/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I6U2HeIcww +++ mktemp ++ local LAST_ERR=/tmp/tmp.srAJ0IL1X6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I6U2HeIcww ++ cat /tmp/tmp.srAJ0IL1X6 ++ rm /tmp/tmp.I6U2HeIcww /tmp/tmp.srAJ0IL1X6 ++ return 0 + client_pod=pxc-client-6644d8898f-c8cvc + wait_pod pxc-client-6644d8898f-c8cvc + local pod=pxc-client-6644d8898f-c8cvc + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-c8cvc ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-c8cvc condition met pxc-client-6644d8898f-c8cvc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.pbd6iPrXdE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1732/e2e-tests/smart-update1/compare/select-1.sql /tmp/tmp.pbd6iPrXdE/select-1.sql ++ is_keyring_plugin_in_use smart-update ++ local cluster=smart-update ++ kubectl_bin exec -it smart-update-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E5Kiacr6JK +++ mktemp ++ local LAST_ERR=/tmp/tmp.oBh6HWsnqK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it smart-update-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.E5Kiacr6JK ++ cat /tmp/tmp.oBh6HWsnqK Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.E5Kiacr6JK /tmp/tmp.oBh6HWsnqK ++ return 0 + '[' '' ']' +++ get_proxy smart-update +++ local target_cluster=smart-update ++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.XmEvrgaL9k +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.zQBpK4tnwx ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.XmEvrgaL9k ++++ cat /tmp/tmp.zQBpK4tnwx ++++ rm /tmp/tmp.XmEvrgaL9k /tmp/tmp.zQBpK4tnwx ++++ return 0 +++ [[ '' == \t\r\u\e ]] ++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.w4y7BWnuJQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qulonVT4H9 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.w4y7BWnuJQ ++++ cat /tmp/tmp.qulonVT4H9 ++++ rm /tmp/tmp.w4y7BWnuJQ /tmp/tmp.qulonVT4H9 ++++ return 0 +++ [[ true == \t\r\u\e ]] +++ echo smart-update-proxysql +++ return ++ get_proxy_primary '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' smart-update-proxysql-0 ++ local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' ++ local pod=smart-update-proxysql-0 +++ run_mysql_local 'SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='\''ONLINE'\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' smart-update-proxysql-0 proxysql +++ local 'command=SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='\''ONLINE'\'';' +++ local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' +++ local pod=smart-update-proxysql-0 +++ local container_name=proxysql +++ set +o xtrace ++ local ip=smart-update-pxc-0.smart-update-pxc.smart-update1-26886.svc.cluster.local +++ wc -l +++ echo smart-update-pxc-0.smart-update-pxc.smart-update1-26886.svc.cluster.local ++ '[' 1 '!=' 1 ']' ++ cut -d. -f1 ++ echo smart-update-pxc-0.smart-update-pxc.smart-update1-26886.svc.cluster.local + initial_primary=smart-update-pxc-0 + kubectl_bin patch pxc/smart-update --type=merge -p '{"spec":{"pxc":{"image":"perconalab/percona-xtradb-cluster-operator:main-pxc8.0"}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.XeqJ5NLqVS ++ mktemp + local LAST_ERR=/tmp/tmp.YUS8pj4AaI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc/smart-update --type=merge -p '{"spec":{"pxc":{"image":"perconalab/percona-xtradb-cluster-operator:main-pxc8.0"}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XeqJ5NLqVS perconaxtradbcluster.pxc.percona.com/smart-update patched + cat /tmp/tmp.YUS8pj4AaI + rm /tmp/tmp.XeqJ5NLqVS /tmp/tmp.YUS8pj4AaI + return 0 + sleep 7 + desc 'check last pod to update' + set +o xtrace ----------------------------------------------------------------------------------- check last pod to update ----------------------------------------------------------------------------------- + check_last_pod_to_update smart-update smart-update-pxc-0 3 perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + local cluster=smart-update + local initial_primary=smart-update-pxc-0 + local pxc_size=3 + local target_image=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + set +x Waiting for the last pod to update.................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................Something went wrong waiting for the last pod to update!