Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/logs/smart-update2-8-0.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + API=pxc.percona.com/v9-9-9 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + CLUSTER=smart-update + CLUSTER_SIZE=3 + PROXY_SIZE=2 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 + TARGET_IMAGE_PXC_VS=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + VS_URL=http://version-service + VS_PORT=11000 + VS_ENDPOINT=http://version-service:11000 + [[ 8.0 == \8\.\4 ]] + VS_UPDATE_STRATEGY=recommended + main + create_infra smart-update2-11941 + local ns=smart-update2-11941 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n smart-update2-21469 minimal-cluster --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/minimal-cluster patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.jeE33FKCqm ++ mktemp + local LAST_ERR=/tmp/tmp.H1slJWphJ1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jeE33FKCqm perconaxtradbcluster.pxc.percona.com "minimal-cluster" deleted from smart-update2-21469 namespace + cat /tmp/tmp.H1slJWphJ1 + rm /tmp/tmp.jeE33FKCqm /tmp/tmp.H1slJWphJ1 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.N6mzwf4MEk ++ mktemp + local LAST_ERR=/tmp/tmp.Ih0lutNGgi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.N6mzwf4MEk No resources found + cat /tmp/tmp.Ih0lutNGgi + rm /tmp/tmp.N6mzwf4MEk /tmp/tmp.Ih0lutNGgi + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.Yvol5OkhA9 ++ mktemp + local LAST_ERR=/tmp/tmp.ChIGPKHA9V + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Yvol5OkhA9 No resources found + cat /tmp/tmp.ChIGPKHA9V + rm /tmp/tmp.Yvol5OkhA9 /tmp/tmp.ChIGPKHA9V + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + kubectl_bin get ns ++ mktemp + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.9djxopxwXI + local LAST_OUT=/tmp/tmp.6dJ4IDVr3h ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.54KEqreeSy + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.qWG0qaDXU7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9djxopxwXI + cat /tmp/tmp.qWG0qaDXU7 + rm /tmp/tmp.9djxopxwXI /tmp/tmp.qWG0qaDXU7 + return 0 namespace "cert-manager" deleted namespace "smart-update2-21469" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6dJ4IDVr3h namespace "pxc-operator" deleted + cat /tmp/tmp.54KEqreeSy + rm /tmp/tmp.6dJ4IDVr3h /tmp/tmp.54KEqreeSy + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yEOl5pmtZb ++ mktemp + local LAST_ERR=/tmp/tmp.E2BfHeMRGS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yEOl5pmtZb namespace/pxc-operator created + cat /tmp/tmp.E2BfHeMRGS + rm /tmp/tmp.yEOl5pmtZb /tmp/tmp.E2BfHeMRGS + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.fBqfcBIj57 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ak0SkPzwkw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fBqfcBIj57 ++ cat /tmp/tmp.ak0SkPzwkw ++ rm /tmp/tmp.fBqfcBIj57 /tmp/tmp.ak0SkPzwkw ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bNFd8BtcdH ++ mktemp + local LAST_ERR=/tmp/tmp.1Vo7V9S3az + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bNFd8BtcdH Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5" modified. + cat /tmp/tmp.1Vo7V9S3az + rm /tmp/tmp.bNFd8BtcdH /tmp/tmp.1Vo7V9S3az + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oEIwRtvVkr ++ mktemp + local LAST_ERR=/tmp/tmp.sTKVX8tRQv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oEIwRtvVkr customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.sTKVX8tRQv + rm /tmp/tmp.oEIwRtvVkr /tmp/tmp.sTKVX8tRQv + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jYO0X69Dc9 ++ mktemp + local LAST_ERR=/tmp/tmp.Ndp9hpYT9v + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jYO0X69Dc9 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.Ndp9hpYT9v + rm /tmp/tmp.jYO0X69Dc9 /tmp/tmp.Ndp9hpYT9v + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/cw-operator.yaml + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - ++ mktemp + local LAST_OUT=/tmp/tmp.FFl6BCHqrP ++ mktemp + local LAST_ERR=/tmp/tmp.HQKABF3O1e + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FFl6BCHqrP deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.HQKABF3O1e + rm /tmp/tmp.FFl6BCHqrP /tmp/tmp.HQKABF3O1e + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.YdNM3qYWCC ++ mktemp + local LAST_ERR=/tmp/tmp.7a4Gsggfbx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YdNM3qYWCC pod/percona-xtradb-cluster-operator-7c94dbdc94-xfmrd condition met + cat /tmp/tmp.7a4Gsggfbx + rm /tmp/tmp.YdNM3qYWCC /tmp/tmp.7a4Gsggfbx + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.39GOIopaFi +++ mktemp ++ local LAST_ERR=/tmp/tmp.FDCHjkjaUE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.39GOIopaFi ++ cat /tmp/tmp.FDCHjkjaUE ++ rm /tmp/tmp.39GOIopaFi /tmp/tmp.FDCHjkjaUE ++ return 0 + wait_pod percona-xtradb-cluster-operator-7c94dbdc94-xfmrd 480 pxc-operator + local pod=percona-xtradb-cluster-operator-7c94dbdc94-xfmrd + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-7c94dbdc94-xfmrd ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-7c94dbdc94-xfmrd condition met waiting for pod/percona-xtradb-cluster-operator-7c94dbdc94-xfmrd to become Ready.Ok + sleep 3 + create_namespace smart-update2-11941 + local namespace=smart-update2-11941 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces smart-update2-11941' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces smart-update2-11941 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.3NuPFY4tbx + awk '{print$1}' ++ mktemp + local LAST_ERR=/tmp/tmp.gFUsocrYIS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update2-11941 + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.r48qbo7czM ++ mktemp + local LAST_ERR=/tmp/tmp.Oio7q1kaJS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update2-11941 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.r48qbo7czM + cat /tmp/tmp.Oio7q1kaJS + rm /tmp/tmp.r48qbo7czM /tmp/tmp.Oio7q1kaJS + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.3NuPFY4tbx + cat /tmp/tmp.gFUsocrYIS Error from server (NotFound): namespaces "smart-update2-11941" not found + rm /tmp/tmp.3NuPFY4tbx /tmp/tmp.gFUsocrYIS + return 1 + : + wait_for_delete namespace/smart-update2-11941 + local res=namespace/smart-update2-11941 + echo -n 'waiting for namespace/smart-update2-11941 to be deleted' waiting for namespace/smart-update2-11941 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "smart-update2-11941" not found + desc 'create namespace smart-update2-11941' + set +o xtrace ----------------------------------------------------------------------------------- create namespace smart-update2-11941 ----------------------------------------------------------------------------------- + kubectl_bin create namespace smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.jBV9q7q0Cy ++ mktemp + local LAST_ERR=/tmp/tmp.7tCgNpTrjd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace smart-update2-11941 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jBV9q7q0Cy namespace/smart-update2-11941 created + cat /tmp/tmp.7tCgNpTrjd + rm /tmp/tmp.jBV9q7q0Cy /tmp/tmp.7tCgNpTrjd + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.iDgP7cG2IQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.T9WMckbVGD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iDgP7cG2IQ ++ cat /tmp/tmp.T9WMckbVGD ++ rm /tmp/tmp.iDgP7cG2IQ /tmp/tmp.T9WMckbVGD ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5 --namespace=smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.ywDVGq5B7n ++ mktemp + local LAST_ERR=/tmp/tmp.koJ0o3uyUH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5 --namespace=smart-update2-11941 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ywDVGq5B7n Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2349-b5e2b8a7-1-cluster5" modified. + cat /tmp/tmp.koJ0o3uyUH + rm /tmp/tmp.ywDVGq5B7n /tmp/tmp.koJ0o3uyUH + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9MVAEhclHX ++ mktemp + local LAST_ERR=/tmp/tmp.nRlkEnjTjQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9MVAEhclHX secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.nRlkEnjTjQ + rm /tmp/tmp.9MVAEhclHX /tmp/tmp.nRlkEnjTjQ + return 0 + deploy_version_service + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.OkyFYnz6yD ++ mktemp + local LAST_ERR=/tmp/tmp.XhsxFEDWL7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OkyFYnz6yD configmap/versions created + cat /tmp/tmp.XhsxFEDWL7 + rm /tmp/tmp.OkyFYnz6yD /tmp/tmp.XhsxFEDWL7 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.FZ3zYjY76Z ++ mktemp + local LAST_ERR=/tmp/tmp.aS8UZ59wu7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FZ3zYjY76Z deployment.apps/version-service created service/version-service created + cat /tmp/tmp.aS8UZ59wu7 + rm /tmp/tmp.FZ3zYjY76Z /tmp/tmp.aS8UZ59wu7 + return 0 + sleep 10 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.5fvdkOkgNI ++ mktemp + local LAST_ERR=/tmp/tmp.5xSlb5ONG8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5fvdkOkgNI namespace/cert-manager created + cat /tmp/tmp.5xSlb5ONG8 + rm /tmp/tmp.5fvdkOkgNI /tmp/tmp.5xSlb5ONG8 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.AfCKf2LWIZ ++ mktemp + local LAST_ERR=/tmp/tmp.WxewJx3tGV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AfCKf2LWIZ namespace/cert-manager labeled + cat /tmp/tmp.WxewJx3tGV + rm /tmp/tmp.AfCKf2LWIZ /tmp/tmp.WxewJx3tGV + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.9ApLS3iMAC ++ mktemp + local LAST_ERR=/tmp/tmp.gHEmej980v + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9ApLS3iMAC namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.gHEmej980v Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.9ApLS3iMAC /tmp/tmp.gHEmej980v + return 0 + '[' '' == 4.10 ']' + sleep 70 +++ get_operator_pod +++ local label_prefix=app.kubernetes.io/ ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ head -n1 ++ tail -n3 ++++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++++ grep -c percona-xtradb-cluster-operator ++ grep :8.0 ++ sort -V +++ local check_label=1 +++ [[ 1 -eq 0 ]] +++ head -1 +++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' +++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++++ mktemp +++ local LAST_OUT=/tmp/tmp.onNdAL4NDu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pvIZhafmME +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.onNdAL4NDu +++ cat /tmp/tmp.pvIZhafmME +++ rm /tmp/tmp.onNdAL4NDu /tmp/tmp.pvIZhafmME +++ return 0 ++ kubectl_bin exec -ti percona-xtradb-cluster-operator-7c94dbdc94-xfmrd -n pxc-operator -- curl -s http://version-service.smart-update2-11941.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 +++ mktemp ++ local LAST_OUT=/tmp/tmp.B6EyRD7WsK +++ mktemp ++ local LAST_ERR=/tmp/tmp.3gIRetvL0h ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -ti percona-xtradb-cluster-operator-7c94dbdc94-xfmrd -n pxc-operator -- curl -s http://version-service.smart-update2-11941.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.B6EyRD7WsK ++ cat /tmp/tmp.3gIRetvL0h Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.B6EyRD7WsK /tmp/tmp.3gIRetvL0h ++ return 0 + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.41-32.1 + kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.fbJyOSmEuA ++ mktemp + local LAST_ERR=/tmp/tmp.1Ti27NFhOO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fbJyOSmEuA customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com patched + cat /tmp/tmp.1Ti27NFhOO + rm /tmp/tmp.fbJyOSmEuA /tmp/tmp.1Ti27NFhOO + return 0 + kubectl_bin -n pxc-operator set env deploy/percona-xtradb-cluster-operator PERCONA_VS_FALLBACK_URI=http://version-service.smart-update2-11941.svc.cluster.local:11000 ++ mktemp + local LAST_OUT=/tmp/tmp.8Dq9ZvYW5z ++ mktemp + local LAST_ERR=/tmp/tmp.i6wuedOkDU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n pxc-operator set env deploy/percona-xtradb-cluster-operator PERCONA_VS_FALLBACK_URI=http://version-service.smart-update2-11941.svc.cluster.local:11000 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8Dq9ZvYW5z deployment.apps/percona-xtradb-cluster-operator env updated + cat /tmp/tmp.i6wuedOkDU + rm /tmp/tmp.8Dq9ZvYW5z /tmp/tmp.i6wuedOkDU + return 0 + desc 'Starting telemetry testing' + set +o xtrace ----------------------------------------------------------------------------------- Starting telemetry testing ----------------------------------------------------------------------------------- + /usr/bin/sed s/version-service/version-service-cr/g /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml ++ yq 'select(.kind == "Deployment").spec.template.spec.containers[0].image' /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.46Vf3vMLsT + yq eval '(. | select(.kind == "Deployment") | .spec.template.spec.containers[0].image) = "perconalab/version-service:main-latest"' ++ mktemp + local LAST_ERR=/tmp/tmp.0X7DV7eMiV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.46Vf3vMLsT deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.0X7DV7eMiV + rm /tmp/tmp.46Vf3vMLsT /tmp/tmp.0X7DV7eMiV + return 0 + kubectl_bin delete pod -l run=version-service ++ mktemp + local LAST_OUT=/tmp/tmp.ByY7k1Nwt0 ++ mktemp + local LAST_ERR=/tmp/tmp.F6TfbohIIk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ByY7k1Nwt0 pod "version-service-5676bdcbd9-7d7h2" deleted from smart-update2-11941 namespace + cat /tmp/tmp.F6TfbohIIk + rm /tmp/tmp.ByY7k1Nwt0 /tmp/tmp.F6TfbohIIk + return 0 ++ echo -n percona/percona-xtradb-cluster:8.0.41-32.1 ++ sed -r 's/^.*:([0-9]+.[0-9]+).*/\1/' + IMAGE_PREFIX=8.0 + desc 'Enable telemetry on operator level' + set +o xtrace ----------------------------------------------------------------------------------- Enable telemetry on operator level ----------------------------------------------------------------------------------- + yq '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "false"' + kubectl_bin apply -n pxc-operator -f - ++ mktemp + kubectl_bin get deployment/percona-xtradb-cluster-operator -o yaml -n pxc-operator + local LAST_OUT=/tmp/tmp.RBR8P7Bzha ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.E60MG5OjjT + local LAST_ERR=/tmp/tmp.cspmJw7GbV + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.7CVqGsUwSg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment/percona-xtradb-cluster-operator -o yaml -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.E60MG5OjjT + cat /tmp/tmp.7CVqGsUwSg + rm /tmp/tmp.E60MG5OjjT /tmp/tmp.7CVqGsUwSg + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RBR8P7Bzha deployment.apps/percona-xtradb-cluster-operator configured + cat /tmp/tmp.cspmJw7GbV + rm /tmp/tmp.RBR8P7Bzha /tmp/tmp.cspmJw7GbV + return 0 + sleep 30 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.QlcB20R3qJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.aRnTFbfsAP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QlcB20R3qJ ++ cat /tmp/tmp.aRnTFbfsAP ++ rm /tmp/tmp.QlcB20R3qJ /tmp/tmp.aRnTFbfsAP ++ return 0 + wait_pod percona-xtradb-cluster-operator-849598889-rgbml 480 pxc-operator + local pod=percona-xtradb-cluster-operator-849598889-rgbml + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-849598889-rgbml ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-849598889-rgbml condition met waiting for pod/percona-xtradb-cluster-operator-849598889-rgbml to become Ready.Ok + check_telemetry_transfer http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 disabled enabled + local cr_vs_uri=http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + desc 'create PXC minimal cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC minimal cluster ----------------------------------------------------------------------------------- + cluster=minimal-cluster + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zQX1jSnTzZ ++ mktemp + local LAST_ERR=/tmp/tmp.JYVaA6fnfi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zQX1jSnTzZ deployment.apps/pxc-client created + cat /tmp/tmp.JYVaA6fnfi + rm /tmp/tmp.zQX1jSnTzZ /tmp/tmp.JYVaA6fnfi + return 0 + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + yq eval '(. | select(.metadata.name == "my-cluster-secrets") | .metadata.name) = "minimal-cluster"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CdA92nFO7P ++ mktemp + local LAST_ERR=/tmp/tmp.ICCKujlo5E + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CdA92nFO7P secret/minimal-cluster created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.ICCKujlo5E + rm /tmp/tmp.CdA92nFO7P /tmp/tmp.ICCKujlo5E + return 0 + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/cr-minimal.yaml + yq eval '.spec.upgradeOptions.versionServiceEndpoint="http://version-service-cr.smart-update2-11941.svc.cluster.local:11000"' + yq eval '.spec.upgradeOptions.apply="disabled"' + yq eval '.spec.logcollector.image="perconalab/fluentbit:main-logcollector"' + yq eval '.spec.pxc.image="percona/percona-xtradb-cluster:8.0.41-32.1"' + yq eval '.spec.crVersion="9.9.9"' + kubectl_bin apply -f - ++ mktemp + yq eval '.spec.haproxy.image="perconalab/percona-xtradb-cluster-operator:main-haproxy"' + yq eval '.spec.initContainer.image="perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7"' + local LAST_OUT=/tmp/tmp.gUGCQeFEXG ++ mktemp + local LAST_ERR=/tmp/tmp.zu9zmrHnSZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gUGCQeFEXG perconaxtradbcluster.pxc.percona.com/minimal-cluster created + cat /tmp/tmp.zu9zmrHnSZ + rm /tmp/tmp.gUGCQeFEXG /tmp/tmp.zu9zmrHnSZ + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-pxc 1 + local name=minimal-cluster-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-pxc-0 480 + local pod=minimal-cluster-pxc-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/minimal-cluster-pxc-0 condition met waiting for pod/minimal-cluster-pxc-0 to become Ready.Ok + sleep 20 + local proxy ++ get_proxy minimal-cluster ++ local target_cluster=minimal-cluster +++ kubectl_bin get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lrJwgtZa4n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OdREQK3Fyg +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.lrJwgtZa4n +++ cat /tmp/tmp.OdREQK3Fyg +++ rm /tmp/tmp.lrJwgtZa4n /tmp/tmp.OdREQK3Fyg +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo minimal-cluster-haproxy ++ return + proxy=minimal-cluster-haproxy + wait_for_running minimal-cluster-haproxy 1 + local name=minimal-cluster-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-haproxy-0 480 + local pod=minimal-cluster-haproxy-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minimal-cluster-haproxy-0 condition met waiting for pod/minimal-cluster-haproxy-0 to become Ready.Ok + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZDLmkCsTHA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ajvrtjnrrM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZDLmkCsTHA ++ cat /tmp/tmp.ajvrtjnrrM ++ rm /tmp/tmp.ZDLmkCsTHA /tmp/tmp.ajvrtjnrrM ++ return 0 + client_pod=pxc-client-c75dc5c46-bszlv + wait_pod pxc-client-c75dc5c46-bszlv + local pod=pxc-client-c75dc5c46-bszlv + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-c75dc5c46-bszlv + local container= + set +o xtrace pod/pxc-client-c75dc5c46-bszlv condition met waiting for pod/pxc-client-c75dc5c46-bszlv to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zXDYQw7MgU +++ mktemp ++ local LAST_ERR=/tmp/tmp.FPpMBXvSSn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zXDYQw7MgU ++ cat /tmp/tmp.FPpMBXvSSn ++ rm /tmp/tmp.zXDYQw7MgU /tmp/tmp.FPpMBXvSSn ++ return 0 + client_pod=pxc-client-c75dc5c46-bszlv + wait_pod pxc-client-c75dc5c46-bszlv + local pod=pxc-client-c75dc5c46-bszlv + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-bszlv ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-bszlv condition met waiting for pod/pxc-client-c75dc5c46-bszlv to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.time_ms")' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + kubectl_bin logs version-service-cr-9d9559c6f-c2kpq ++ mktemp + local LAST_OUT=/tmp/tmp.NqKSwkTR3S ++ mktemp + local LAST_ERR=/tmp/tmp.swpW2fd2v7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-9d9559c6f-c2kpq + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NqKSwkTR3S + cat /tmp/tmp.swpW2fd2v7 + rm /tmp/tmp.NqKSwkTR3S /tmp/tmp.swpW2fd2v7 + return 0 + grep -Eo '\{.*\}' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.request.content".msg.customResourceUid)' + kubectl_bin logs version-service-5676bdcbd9-f8w6k ++ mktemp + local LAST_OUT=/tmp/tmp.RNiil2pu6b ++ mktemp + local LAST_ERR=/tmp/tmp.zInUKVrlbH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-5676bdcbd9-f8w6k + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RNiil2pu6b + cat /tmp/tmp.zInUKVrlbH + rm /tmp/tmp.RNiil2pu6b /tmp/tmp.zInUKVrlbH + return 0 + local telemetry_log_file=enabled_telemetry.version-service-cw.log.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == enabled ']' + desc 'operator fallback VS should have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- operator fallback VS should have telemetry ----------------------------------------------------------------------------------- + diff /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/enabled_telemetry.version-service-cw.log.json /dev/fd/63 ++ grep -f /tmp/tmp.2CZMOzEkaK/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/enabled_telemetry.version-service-cw.log.json + desc 'CR VS should not have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- CR VS should not have telemetry ----------------------------------------------------------------------------------- + [[ -s /tmp/tmp.2CZMOzEkaK/enabled_telemetry.version-service-cr.log.json ]] + local image_prefix=disabled + image_prefix=disabled + local telemetry_cr_log_file=enabled_telemetry.version-service-cr-disabled-cw.log.json + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + [[ disabled == \d\i\s\a\b\l\e\d\-\r\e\c\o\m\m\e\n\d\e\d ]] + [[ disabled == \d\i\s\a\b\l\e\d\-\l\a\t\e\s\t ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == disabled ']' + kubectl_bin patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.8uLG1BbN1k ++ mktemp + local LAST_ERR=/tmp/tmp.tDxpw43SUj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8uLG1BbN1k perconaxtradbcluster.pxc.percona.com/minimal-cluster patched + cat /tmp/tmp.tDxpw43SUj + rm /tmp/tmp.8uLG1BbN1k /tmp/tmp.tDxpw43SUj + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BR9dUK2uFg +++ mktemp ++ local LAST_ERR=/tmp/tmp.rTAKQZRHFj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BR9dUK2uFg ++ cat /tmp/tmp.rTAKQZRHFj ++ rm /tmp/tmp.BR9dUK2uFg /tmp/tmp.rTAKQZRHFj ++ return 0 + kubectl_bin delete pod -n pxc-operator percona-xtradb-cluster-operator-849598889-rgbml ++ mktemp + local LAST_OUT=/tmp/tmp.lAzweJKrdM ++ mktemp + local LAST_ERR=/tmp/tmp.Bkxxj6SyF7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n pxc-operator percona-xtradb-cluster-operator-849598889-rgbml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lAzweJKrdM pod "percona-xtradb-cluster-operator-849598889-rgbml" deleted from pxc-operator namespace + cat /tmp/tmp.Bkxxj6SyF7 + rm /tmp/tmp.lAzweJKrdM /tmp/tmp.Bkxxj6SyF7 + return 0 + kubectl_bin delete pxc --all ++ mktemp + local LAST_OUT=/tmp/tmp.OEqC9G2vt4 ++ mktemp + local LAST_ERR=/tmp/tmp.JpqDXN82N1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OEqC9G2vt4 perconaxtradbcluster.pxc.percona.com "minimal-cluster" deleted from smart-update2-11941 namespace + cat /tmp/tmp.JpqDXN82N1 + rm /tmp/tmp.OEqC9G2vt4 /tmp/tmp.JpqDXN82N1 + return 0 + kubectl_bin delete deploy pxc-client ++ mktemp + local LAST_OUT=/tmp/tmp.4oZXDYSk6K ++ mktemp + local LAST_ERR=/tmp/tmp.AX26Crqvh6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy pxc-client + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4oZXDYSk6K deployment.apps "pxc-client" deleted from smart-update2-11941 namespace + cat /tmp/tmp.AX26Crqvh6 + rm /tmp/tmp.4oZXDYSk6K /tmp/tmp.AX26Crqvh6 + return 0 + sleep 30 + desc 'Disabling telemetry on the operator level' + set +o xtrace ----------------------------------------------------------------------------------- Disabling telemetry on the operator level ----------------------------------------------------------------------------------- + kubectl_bin delete pod -l run=version-service-cr ++ mktemp + local LAST_OUT=/tmp/tmp.JXKMTP60b8 ++ mktemp + local LAST_ERR=/tmp/tmp.0BVgfXK2Q2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service-cr + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JXKMTP60b8 pod "version-service-cr-9d9559c6f-c2kpq" deleted from smart-update2-11941 namespace + cat /tmp/tmp.0BVgfXK2Q2 + rm /tmp/tmp.JXKMTP60b8 /tmp/tmp.0BVgfXK2Q2 + return 0 + kubectl_bin delete pod -l run=version-service ++ mktemp + local LAST_OUT=/tmp/tmp.ZWRgA3VuAB ++ mktemp + local LAST_ERR=/tmp/tmp.xV7Tpuqd5l + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZWRgA3VuAB pod "version-service-5676bdcbd9-f8w6k" deleted from smart-update2-11941 namespace + cat /tmp/tmp.xV7Tpuqd5l + rm /tmp/tmp.ZWRgA3VuAB /tmp/tmp.xV7Tpuqd5l + return 0 + kubectl_bin get deployment/percona-xtradb-cluster-operator -o yaml -n pxc-operator + yq '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n pxc-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.d9IPwRskJe + local LAST_OUT=/tmp/tmp.Nc3bGQ90ay ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.9HIvmUnqDC + local exit_status=0 + local LAST_ERR=/tmp/tmp.zPNSynLOLd + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment/percona-xtradb-cluster-operator -o yaml -n pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.d9IPwRskJe + cat /tmp/tmp.9HIvmUnqDC + rm /tmp/tmp.d9IPwRskJe /tmp/tmp.9HIvmUnqDC + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Nc3bGQ90ay deployment.apps/percona-xtradb-cluster-operator configured + cat /tmp/tmp.zPNSynLOLd + rm /tmp/tmp.Nc3bGQ90ay /tmp/tmp.zPNSynLOLd + return 0 + sleep 30 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.wYSreWgPWl +++ mktemp ++ local LAST_ERR=/tmp/tmp.reAVTOsC8A ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wYSreWgPWl ++ cat /tmp/tmp.reAVTOsC8A ++ rm /tmp/tmp.wYSreWgPWl /tmp/tmp.reAVTOsC8A ++ return 0 + wait_pod percona-xtradb-cluster-operator-86b4bcffd-cgkxn 480 pxc-operator + local pod=percona-xtradb-cluster-operator-86b4bcffd-cgkxn + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-86b4bcffd-cgkxn ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-86b4bcffd-cgkxn condition met waiting for pod/percona-xtradb-cluster-operator-86b4bcffd-cgkxn to become Ready.Ok + check_telemetry_transfer http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 8.0-recommended disabled + local cr_vs_uri=http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 + local cr_vs_channel=8.0-recommended + local telemetry_state=disabled + desc 'create PXC minimal cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC minimal cluster ----------------------------------------------------------------------------------- + cluster=minimal-cluster + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ztx37wDaAQ ++ mktemp + local LAST_ERR=/tmp/tmp.zZ7DB4SvOQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ztx37wDaAQ deployment.apps/pxc-client created + cat /tmp/tmp.zZ7DB4SvOQ + rm /tmp/tmp.ztx37wDaAQ /tmp/tmp.zZ7DB4SvOQ + return 0 + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + yq eval '(. | select(.metadata.name == "my-cluster-secrets") | .metadata.name) = "minimal-cluster"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Z6Sgf8eMqE ++ mktemp + local LAST_ERR=/tmp/tmp.EPElxYNwp1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Z6Sgf8eMqE secret/minimal-cluster unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.EPElxYNwp1 + rm /tmp/tmp.Z6Sgf8eMqE /tmp/tmp.EPElxYNwp1 + return 0 + yq eval '.spec.upgradeOptions.versionServiceEndpoint="http://version-service-cr.smart-update2-11941.svc.cluster.local:11000"' + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/cr-minimal.yaml + yq eval '.spec.upgradeOptions.apply="8.0-recommended"' + yq eval '.spec.initContainer.image="perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7"' + yq eval '.spec.haproxy.image="perconalab/percona-xtradb-cluster-operator:main-haproxy"' + yq eval '.spec.logcollector.image="perconalab/fluentbit:main-logcollector"' + yq eval '.spec.crVersion="9.9.9"' + yq eval '.spec.pxc.image="percona/percona-xtradb-cluster:8.0.41-32.1"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.j1jA3N4SX7 ++ mktemp + local LAST_ERR=/tmp/tmp.sLgnC2o9rM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.j1jA3N4SX7 perconaxtradbcluster.pxc.percona.com/minimal-cluster created + cat /tmp/tmp.sLgnC2o9rM + rm /tmp/tmp.j1jA3N4SX7 /tmp/tmp.sLgnC2o9rM + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-pxc 1 + local name=minimal-cluster-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-pxc-0 480 + local pod=minimal-cluster-pxc-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/minimal-cluster-pxc-0 condition met waiting for pod/minimal-cluster-pxc-0 to become Ready.Ok + sleep 20 + local proxy ++ get_proxy minimal-cluster ++ local target_cluster=minimal-cluster +++ kubectl_bin get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.42NYLfjH8Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.80y2iDlb82 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.42NYLfjH8Y +++ cat /tmp/tmp.80y2iDlb82 +++ rm /tmp/tmp.42NYLfjH8Y /tmp/tmp.80y2iDlb82 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo minimal-cluster-haproxy ++ return + proxy=minimal-cluster-haproxy + wait_for_running minimal-cluster-haproxy 1 + local name=minimal-cluster-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-haproxy-0 480 + local pod=minimal-cluster-haproxy-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-haproxy-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/minimal-cluster-haproxy-0 condition met waiting for pod/minimal-cluster-haproxy-0 to become Ready.Ok + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sSOu4raepP +++ mktemp ++ local LAST_ERR=/tmp/tmp.hTfPMxw29t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sSOu4raepP ++ cat /tmp/tmp.hTfPMxw29t ++ rm /tmp/tmp.sSOu4raepP /tmp/tmp.hTfPMxw29t ++ return 0 + client_pod=pxc-client-c75dc5c46-5lc6w + wait_pod pxc-client-c75dc5c46-5lc6w + local pod=pxc-client-c75dc5c46-5lc6w + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-5lc6w ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-5lc6w condition met waiting for pod/pxc-client-c75dc5c46-5lc6w to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XPKlHjTBbr +++ mktemp ++ local LAST_ERR=/tmp/tmp.pjtkgjh1sl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XPKlHjTBbr ++ cat /tmp/tmp.pjtkgjh1sl ++ rm /tmp/tmp.XPKlHjTBbr /tmp/tmp.pjtkgjh1sl ++ return 0 + client_pod=pxc-client-c75dc5c46-5lc6w + wait_pod pxc-client-c75dc5c46-5lc6w + local pod=pxc-client-c75dc5c46-5lc6w + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-5lc6w ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-5lc6w condition met waiting for pod/pxc-client-c75dc5c46-5lc6w to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + grep -E 'server request payload|unary call' + kubectl_bin logs version-service-cr-9d9559c6f-cx79k ++ mktemp + local LAST_OUT=/tmp/tmp.t5k8kKqwZV ++ mktemp + local LAST_ERR=/tmp/tmp.zqwnn6SNjp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-9d9559c6f-cx79k + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.t5k8kKqwZV + cat /tmp/tmp.zqwnn6SNjp + rm /tmp/tmp.t5k8kKqwZV /tmp/tmp.zqwnn6SNjp + return 0 ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + grep -Eo '\{.*\}' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.kubeVersion)' + kubectl_bin logs version-service-5676bdcbd9-hbh8v ++ mktemp + local LAST_OUT=/tmp/tmp.JkFmQr2VD5 ++ mktemp + local LAST_ERR=/tmp/tmp.HpZj02UnR8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-5676bdcbd9-hbh8v + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JkFmQr2VD5 + cat /tmp/tmp.HpZj02UnR8 + rm /tmp/tmp.JkFmQr2VD5 /tmp/tmp.HpZj02UnR8 + return 0 + local telemetry_log_file=disabled_telemetry.version-service-cw.log.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == enabled ']' + local image_prefix=8.0 + image_prefix=8.0 + local telemetry_cr_log_file=disabled_telemetry.version-service-cr-8.0-cw.log.json + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + [[ 8.0-recommended == \8\.\0\-\r\e\c\o\m\m\e\n\d\e\d ]] + '[' disabled == disabled ']' + desc 'cr VS should have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- cr VS should have telemetry ----------------------------------------------------------------------------------- + diff /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.0-cw.log.json /dev/fd/63 ++ grep -f /tmp/tmp.2CZMOzEkaK/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.0-cw.log.json + desc 'operator VS should not have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- operator VS should not have telemetry ----------------------------------------------------------------------------------- + [[ -s /tmp/tmp.2CZMOzEkaK/disabled_telemetry.version-service.log.json ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == disabled ']' + kubectl_bin patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.TiFt0ckt8s ++ mktemp + local LAST_ERR=/tmp/tmp.7aT9jnmDce + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TiFt0ckt8s perconaxtradbcluster.pxc.percona.com/minimal-cluster patched + cat /tmp/tmp.7aT9jnmDce + rm /tmp/tmp.TiFt0ckt8s /tmp/tmp.7aT9jnmDce + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.jgel60BvQa +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y2vZF2d6q6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jgel60BvQa ++ cat /tmp/tmp.Y2vZF2d6q6 ++ rm /tmp/tmp.jgel60BvQa /tmp/tmp.Y2vZF2d6q6 ++ return 0 + kubectl_bin delete pod -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-cgkxn ++ mktemp + local LAST_OUT=/tmp/tmp.vwOc7GrBDE ++ mktemp + local LAST_ERR=/tmp/tmp.bhez9NvPqi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-cgkxn + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vwOc7GrBDE pod "percona-xtradb-cluster-operator-86b4bcffd-cgkxn" deleted from pxc-operator namespace + cat /tmp/tmp.bhez9NvPqi + rm /tmp/tmp.vwOc7GrBDE /tmp/tmp.bhez9NvPqi + return 0 + kubectl_bin delete pxc --all ++ mktemp + local LAST_OUT=/tmp/tmp.l5At2s5d3l ++ mktemp + local LAST_ERR=/tmp/tmp.VRthwO3LKe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.l5At2s5d3l perconaxtradbcluster.pxc.percona.com "minimal-cluster" deleted from smart-update2-11941 namespace + cat /tmp/tmp.VRthwO3LKe + rm /tmp/tmp.l5At2s5d3l /tmp/tmp.VRthwO3LKe + return 0 + kubectl_bin delete deploy pxc-client ++ mktemp + local LAST_OUT=/tmp/tmp.k2fOVUgTWF ++ mktemp + local LAST_ERR=/tmp/tmp.Nz05jWqdMc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy pxc-client + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.k2fOVUgTWF deployment.apps "pxc-client" deleted from smart-update2-11941 namespace + cat /tmp/tmp.Nz05jWqdMc + rm /tmp/tmp.k2fOVUgTWF /tmp/tmp.Nz05jWqdMc + return 0 + sleep 30 + kubectl_bin delete pod -l run=version-service-cr ++ mktemp + local LAST_OUT=/tmp/tmp.7dhGddfDih ++ mktemp + local LAST_ERR=/tmp/tmp.LORoKtBA2e + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service-cr + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7dhGddfDih pod "version-service-cr-9d9559c6f-cx79k" deleted from smart-update2-11941 namespace + cat /tmp/tmp.LORoKtBA2e + rm /tmp/tmp.7dhGddfDih /tmp/tmp.LORoKtBA2e + return 0 + kubectl_bin delete pod -l run=version-service ++ mktemp + local LAST_OUT=/tmp/tmp.G0gLL2Svl3 ++ mktemp + local LAST_ERR=/tmp/tmp.aXpolU0Bob + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.G0gLL2Svl3 pod "version-service-5676bdcbd9-hbh8v" deleted from smart-update2-11941 namespace + cat /tmp/tmp.aXpolU0Bob + rm /tmp/tmp.G0gLL2Svl3 /tmp/tmp.aXpolU0Bob + return 0 + check_telemetry_transfer http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 disabled disabled + local cr_vs_uri=http://version-service-cr.smart-update2-11941.svc.cluster.local:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + desc 'create PXC minimal cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC minimal cluster ----------------------------------------------------------------------------------- + cluster=minimal-cluster + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.lcQZ3kJilx ++ mktemp + local LAST_ERR=/tmp/tmp.nbc4qyBhkw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lcQZ3kJilx deployment.apps/pxc-client created + cat /tmp/tmp.nbc4qyBhkw + rm /tmp/tmp.lcQZ3kJilx /tmp/tmp.nbc4qyBhkw + return 0 + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - + yq eval '(. | select(.metadata.name == "my-cluster-secrets") | .metadata.name) = "minimal-cluster"' ++ mktemp + local LAST_OUT=/tmp/tmp.ZeG8ffzezz ++ mktemp + local LAST_ERR=/tmp/tmp.09cOkc84p3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZeG8ffzezz secret/minimal-cluster unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.09cOkc84p3 + rm /tmp/tmp.ZeG8ffzezz /tmp/tmp.09cOkc84p3 + return 0 + yq eval '.spec.upgradeOptions.versionServiceEndpoint="http://version-service-cr.smart-update2-11941.svc.cluster.local:11000"' + yq eval '.spec.upgradeOptions.apply="disabled"' + kubectl_bin apply -f - + yq eval '.spec.initContainer.image="perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7"' + yq eval '.spec.crVersion="9.9.9"' + yq eval '.spec.logcollector.image="perconalab/fluentbit:main-logcollector"' + yq eval '.spec.haproxy.image="perconalab/percona-xtradb-cluster-operator:main-haproxy"' + yq eval '.spec.pxc.image="percona/percona-xtradb-cluster:8.0.41-32.1"' ++ mktemp + yq /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/deploy/cr-minimal.yaml + local LAST_OUT=/tmp/tmp.kXCGf6dg4Z ++ mktemp + local LAST_ERR=/tmp/tmp.at62J93gbQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kXCGf6dg4Z perconaxtradbcluster.pxc.percona.com/minimal-cluster created + cat /tmp/tmp.at62J93gbQ + rm /tmp/tmp.kXCGf6dg4Z /tmp/tmp.at62J93gbQ + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-pxc 1 + local name=minimal-cluster-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-pxc-0 480 + local pod=minimal-cluster-pxc-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/minimal-cluster-pxc-0 condition met waiting for pod/minimal-cluster-pxc-0 to become Ready.Ok + sleep 20 + local proxy ++ get_proxy minimal-cluster ++ local target_cluster=minimal-cluster +++ kubectl_bin get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BcmNj3Vmyy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iQNcZUXqUk +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc minimal-cluster -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BcmNj3Vmyy +++ cat /tmp/tmp.iQNcZUXqUk +++ rm /tmp/tmp.BcmNj3Vmyy /tmp/tmp.iQNcZUXqUk +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo minimal-cluster-haproxy ++ return + proxy=minimal-cluster-haproxy + wait_for_running minimal-cluster-haproxy 1 + local name=minimal-cluster-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod minimal-cluster-haproxy-0 480 + local pod=minimal-cluster-haproxy-0 + local max_retry=480 + local ns= ++ echo minimal-cluster-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minimal-cluster-haproxy-0 condition met waiting for pod/minimal-cluster-haproxy-0 to become Ready.Ok + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Or3eb6HD7X +++ mktemp ++ local LAST_ERR=/tmp/tmp.NK51ADxeBc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Or3eb6HD7X ++ cat /tmp/tmp.NK51ADxeBc ++ rm /tmp/tmp.Or3eb6HD7X /tmp/tmp.NK51ADxeBc ++ return 0 + client_pod=pxc-client-c75dc5c46-5hsvd + wait_pod pxc-client-c75dc5c46-5hsvd + local pod=pxc-client-c75dc5c46-5hsvd + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-5hsvd ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-5hsvd condition met waiting for pod/pxc-client-c75dc5c46-5hsvd to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h minimal-cluster-haproxy -uroot -proot_password -P' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h minimal-cluster-haproxy -uroot -proot_password -P' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LTD9pWk53n +++ mktemp ++ local LAST_ERR=/tmp/tmp.wwalfQrzUe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LTD9pWk53n ++ cat /tmp/tmp.wwalfQrzUe ++ rm /tmp/tmp.LTD9pWk53n /tmp/tmp.wwalfQrzUe ++ return 0 + client_pod=pxc-client-c75dc5c46-5hsvd + wait_pod pxc-client-c75dc5c46-5hsvd + local pod=pxc-client-c75dc5c46-5hsvd + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-5hsvd ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-5hsvd condition met waiting for pod/pxc-client-c75dc5c46-5hsvd to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace [ERROR] mysql: option '-P' requires an argument. command terminated with exit code 5 + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.start_time")' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -Eo '\{.*\}' + kubectl_bin logs version-service-cr-9d9559c6f-99zb5 ++ mktemp + local LAST_OUT=/tmp/tmp.S5TvK3l0d0 ++ mktemp + local LAST_ERR=/tmp/tmp.Fy0F8ncmuV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-9d9559c6f-99zb5 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.S5TvK3l0d0 + cat /tmp/tmp.Fy0F8ncmuV + rm /tmp/tmp.S5TvK3l0d0 /tmp/tmp.Fy0F8ncmuV + return 0 + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.customResourceUid)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + grep -Eo '\{.*\}' + kubectl_bin logs version-service-5676bdcbd9-nkrr4 ++ mktemp + local LAST_OUT=/tmp/tmp.JABxyzyJDT ++ mktemp + local LAST_ERR=/tmp/tmp.OohBoC7eWo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-5676bdcbd9-nkrr4 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JABxyzyJDT + cat /tmp/tmp.OohBoC7eWo + rm /tmp/tmp.JABxyzyJDT /tmp/tmp.OohBoC7eWo + return 0 + local telemetry_log_file=disabled_telemetry.version-service-cw.log.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == enabled ']' + local image_prefix=disabled + image_prefix=disabled + local telemetry_cr_log_file=disabled_telemetry.version-service-cr-disabled-cw.log.json + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + [[ disabled == \d\i\s\a\b\l\e\d\-\r\e\c\o\m\m\e\n\d\e\d ]] + [[ disabled == \d\i\s\a\b\l\e\d\-\l\a\t\e\s\t ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == disabled ']' + desc 'CR VS should not have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- CR VS should not have telemetry ----------------------------------------------------------------------------------- + [[ -s /tmp/tmp.2CZMOzEkaK/disabled_telemetry.version-service-cr.log.json ]] + desc 'operator VS should not have telemetry' + set +o xtrace ----------------------------------------------------------------------------------- operator VS should not have telemetry ----------------------------------------------------------------------------------- + [[ -s /tmp/tmp.2CZMOzEkaK/disabled_telemetry.version-service.log.json ]] + kubectl_bin patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.puzOHb9PVZ ++ mktemp + local LAST_ERR=/tmp/tmp.P5JTCehJc5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-pxc-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.puzOHb9PVZ perconaxtradbcluster.pxc.percona.com/minimal-cluster patched + cat /tmp/tmp.P5JTCehJc5 + rm /tmp/tmp.puzOHb9PVZ /tmp/tmp.P5JTCehJc5 + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 +++ mktemp ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ local LAST_OUT=/tmp/tmp.U6AVZSjsDL +++ mktemp ++ local LAST_ERR=/tmp/tmp.hcFpINTdrX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U6AVZSjsDL ++ cat /tmp/tmp.hcFpINTdrX ++ rm /tmp/tmp.U6AVZSjsDL /tmp/tmp.hcFpINTdrX ++ return 0 + kubectl_bin delete pod -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-9dfcg ++ mktemp + local LAST_OUT=/tmp/tmp.7hcLjkmUzY ++ mktemp + local LAST_ERR=/tmp/tmp.wIA5zMxBn1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-9dfcg + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7hcLjkmUzY pod "percona-xtradb-cluster-operator-86b4bcffd-9dfcg" deleted from pxc-operator namespace + cat /tmp/tmp.wIA5zMxBn1 + rm /tmp/tmp.7hcLjkmUzY /tmp/tmp.wIA5zMxBn1 + return 0 + kubectl_bin delete pxc --all ++ mktemp + local LAST_OUT=/tmp/tmp.fd7DjxGOTI ++ mktemp + local LAST_ERR=/tmp/tmp.YI2ABqjX3O + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fd7DjxGOTI perconaxtradbcluster.pxc.percona.com "minimal-cluster" deleted from smart-update2-11941 namespace + cat /tmp/tmp.YI2ABqjX3O + rm /tmp/tmp.fd7DjxGOTI /tmp/tmp.YI2ABqjX3O + return 0 + kubectl_bin delete deploy pxc-client ++ mktemp + local LAST_OUT=/tmp/tmp.WvIFS3vR2L ++ mktemp + local LAST_ERR=/tmp/tmp.FVnDfWwWgB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy pxc-client + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WvIFS3vR2L deployment.apps "pxc-client" deleted from smart-update2-11941 namespace + cat /tmp/tmp.FVnDfWwWgB + rm /tmp/tmp.WvIFS3vR2L /tmp/tmp.FVnDfWwWgB + return 0 + sleep 30 + kubectl_bin delete deployment version-service-cr ++ mktemp + local LAST_OUT=/tmp/tmp.BV0GKOD1h3 ++ mktemp + local LAST_ERR=/tmp/tmp.y52bMYBlKV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deployment version-service-cr + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BV0GKOD1h3 deployment.apps "version-service-cr" deleted from smart-update2-11941 namespace + cat /tmp/tmp.y52bMYBlKV + rm /tmp/tmp.BV0GKOD1h3 /tmp/tmp.y52bMYBlKV + return 0 + desc 'Telemetry testing finished' + set +o xtrace ----------------------------------------------------------------------------------- Telemetry testing finished ----------------------------------------------------------------------------------- + desc 'PXC cluster with version service offline' + set +o xtrace ----------------------------------------------------------------------------------- PXC cluster with version service offline ----------------------------------------------------------------------------------- + cp -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + yq -i eval '.spec.initContainer.image = "perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7"' /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + spinup_pxc smart-update /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local cluster=smart-update + local config=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ZTV3UmJccf ++ mktemp + local LAST_ERR=/tmp/tmp.3DpyGaDKGJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZTV3UmJccf secret/my-cluster-secrets created secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.3DpyGaDKGJ + rm /tmp/tmp.ZTV3UmJccf /tmp/tmp.3DpyGaDKGJ + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + kubectl_bin apply -f - + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + local LAST_OUT=/tmp/tmp.jOpRD3PSoK + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + local LAST_ERR=/tmp/tmp.mer4gagVUn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jOpRD3PSoK deployment.apps/pxc-client created + cat /tmp/tmp.mer4gagVUn + rm /tmp/tmp.jOpRD3PSoK /tmp/tmp.mer4gagVUn + return 0 + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + apply_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local config_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml '' + local input_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + cat /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + local LAST_OUT=/tmp/tmp.Ryx9c3QByr + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.ulgtYwebas + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ryx9c3QByr perconaxtradbcluster.pxc.percona.com/smart-update created + cat /tmp/tmp.ulgtYwebas + rm /tmp/tmp.Ryx9c3QByr /tmp/tmp.ulgtYwebas + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy smart-update ++ local target_cluster=smart-update +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hPlcCwWdul ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ryiPYasJQi +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.hPlcCwWdul +++ cat /tmp/tmp.ryiPYasJQi +++ rm /tmp/tmp.hPlcCwWdul /tmp/tmp.ryiPYasJQi +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo smart-update-haproxy ++ return + local proxy=smart-update-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.YjLZgTYBYO ++ mktemp + local LAST_ERR=/tmp/tmp.ex5hIePPYS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.YjLZgTYBYO + cat /tmp/tmp.ex5hIePPYS error: no matching resources found + rm /tmp/tmp.YjLZgTYBYO /tmp/tmp.ex5hIePPYS + return 1 + true + wait_for_running smart-update-haproxy 1 + local name=smart-update-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-haproxy-0 480 + local pod=smart-update-haproxy-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo smart-update-haproxy-0 ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace Error from server (NotFound): pods "smart-update-haproxy-0" not found waiting for pod/smart-update-haproxy-0 to become Ready..................................Ok + wait_for_running smart-update-pxc 3 + local name=smart-update-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-0 480 + local pod=smart-update-pxc-0 + local max_retry=480 + local ns= ++ echo smart-update-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-0 condition met waiting for pod/smart-update-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-1 480 + local pod=smart-update-pxc-1 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo smart-update-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/smart-update-pxc-1 condition met waiting for pod/smart-update-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-2 480 + local pod=smart-update-pxc-2 + local max_retry=480 + local ns= ++ echo smart-update-pxc-2 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/smart-update-pxc-2 condition met waiting for pod/smart-update-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc smart-update -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ base64 --decode ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9nSuTVEfXw +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1FZZkjUFQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9nSuTVEfXw ++ cat /tmp/tmp.g1FZZkjUFQ ++ rm /tmp/tmp.9nSuTVEfXw /tmp/tmp.g1FZZkjUFQ ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.320fHG3y5d +++ mktemp ++ local LAST_ERR=/tmp/tmp.HohSZtvTbY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.320fHG3y5d ++ cat /tmp/tmp.HohSZtvTbY ++ rm /tmp/tmp.320fHG3y5d /tmp/tmp.HohSZtvTbY ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ck3fViC2Re +++ mktemp ++ local LAST_ERR=/tmp/tmp.fXMA8H3bzL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ck3fViC2Re ++ cat /tmp/tmp.fXMA8H3bzL ++ rm /tmp/tmp.ck3fViC2Re /tmp/tmp.fXMA8H3bzL ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZqlXISr3He +++ mktemp ++ local LAST_ERR=/tmp/tmp.m7T61E5FIk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZqlXISr3He ++ cat /tmp/tmp.m7T61E5FIk ++ rm /tmp/tmp.ZqlXISr3He /tmp/tmp.m7T61E5FIk ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XA8gLKuOFV +++ mktemp ++ local LAST_ERR=/tmp/tmp.WVLiln1d5n ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XA8gLKuOFV ++ cat /tmp/tmp.WVLiln1d5n ++ rm /tmp/tmp.XA8gLKuOFV /tmp/tmp.WVLiln1d5n ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5W8bD7mBIj +++ mktemp ++ local LAST_ERR=/tmp/tmp.UO1mCwHSbj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5W8bD7mBIj ++ cat /tmp/tmp.UO1mCwHSbj ++ rm /tmp/tmp.5W8bD7mBIj /tmp/tmp.UO1mCwHSbj ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + is_keyring_plugin_in_use smart-update + local cluster=smart-update + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + grep -E -o 'early-plugin-load=keyring_\w+.so' + kubectl exec -it smart-update-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' Unable to use a TTY - input is not a terminal or the right kind of file + return 1 + wait_cluster_consistency smart-update 3 2 + local cluster_name=smart-update + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/smart-update to be ready' waiting for pxc/smart-update to be ready++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nwblKM00D6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aqgRNZegLp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nwblKM00D6 ++ cat /tmp/tmp.aqgRNZegLp ++ rm /tmp/tmp.nwblKM00D6 /tmp/tmp.aqgRNZegLp ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cWl6qrKffU +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5KXdIvAMj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cWl6qrKffU ++ cat /tmp/tmp.V5KXdIvAMj ++ rm /tmp/tmp.cWl6qrKffU /tmp/tmp.V5KXdIvAMj ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine smart-update +++ local cluster_name=smart-update ++++ get_proxy smart-update ++++ local target_cluster=smart-update +++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.r0NdSOofF5 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.9DQWm7gkjQ +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.r0NdSOofF5 +++++ cat /tmp/tmp.9DQWm7gkjQ +++++ rm /tmp/tmp.r0NdSOofF5 /tmp/tmp.9DQWm7gkjQ +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo smart-update-haproxy ++++ return +++ local cluster_proxy=smart-update-haproxy +++ echo haproxy ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0B4u0MVslh +++ mktemp ++ local LAST_ERR=/tmp/tmp.g6yVqJuSHY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0B4u0MVslh ++ cat /tmp/tmp.g6yVqJuSHY ++ rm /tmp/tmp.0B4u0MVslh /tmp/tmp.g6yVqJuSHY ++ return 0 + [[ 2 == \2 ]] + echo ++ kubectl_bin get pxc/smart-update -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xT9vwQblbK +++ mktemp ++ local LAST_ERR=/tmp/tmp.TImHFQeRtb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc/smart-update -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xT9vwQblbK ++ cat /tmp/tmp.TImHFQeRtb ++ rm /tmp/tmp.xT9vwQblbK /tmp/tmp.TImHFQeRtb ++ return 0 + [[ percona/percona-xtradb-cluster:8.0.41-32.1 != \p\e\r\c\o\n\a\/\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\:\8\.\0\.\4\1\-\3\2\.\1 ]] + desc 'PXC cluster update with recommended image by version service' + set +o xtrace ----------------------------------------------------------------------------------- PXC cluster update with recommended image by version service ----------------------------------------------------------------------------------- + vs_image=recommended ++ run_mysql 'SELECT @@hostname hostname;' '-h smart-update-haproxy -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h smart-update-haproxy -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mFlIJtE006 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CQl0rd9K3H +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.mFlIJtE006 +++ cat /tmp/tmp.CQl0rd9K3H +++ rm /tmp/tmp.mFlIJtE006 /tmp/tmp.CQl0rd9K3H +++ return 0 ++ client_pod=pxc-client-698c5fbfc7-rkjs4 ++ wait_pod pxc-client-698c5fbfc7-rkjs4 ++ local pod=pxc-client-698c5fbfc7-rkjs4 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-698c5fbfc7-rkjs4 +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ grep -E '^(pxc|proxysql)$' ++ local container= ++ set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + initial_primary=smart-update-pxc-0 + kubectl_bin patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"recommended","schedule": "* * * * *"}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.jvD4qdqAxP ++ mktemp + local LAST_ERR=/tmp/tmp.AqTW7lQ36W + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"recommended","schedule": "* * * * *"}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jvD4qdqAxP perconaxtradbcluster.pxc.percona.com/smart-update patched + cat /tmp/tmp.AqTW7lQ36W + rm /tmp/tmp.jvD4qdqAxP /tmp/tmp.AqTW7lQ36W + return 0 + sleep 55 + check_last_pod_to_update smart-update smart-update-pxc-0 3 perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + local cluster=smart-update + local initial_primary=smart-update-pxc-0 + local pxc_size=3 + local target_image=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + set +x Waiting for the last pod to update+ wait_cluster_consistency smart-update 3 2 + local cluster_name=smart-update + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/smart-update to be ready' waiting for pxc/smart-update to be ready++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wbp1N5tiNu +++ mktemp ++ local LAST_ERR=/tmp/tmp.pau8NnSK06 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Wbp1N5tiNu ++ cat /tmp/tmp.pau8NnSK06 ++ rm /tmp/tmp.Wbp1N5tiNu /tmp/tmp.pau8NnSK06 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FTULYfzN0l +++ mktemp ++ local LAST_ERR=/tmp/tmp.spkR2UjzsY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FTULYfzN0l ++ cat /tmp/tmp.spkR2UjzsY ++ rm /tmp/tmp.FTULYfzN0l /tmp/tmp.spkR2UjzsY ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine smart-update +++ local cluster_name=smart-update ++++ get_proxy smart-update ++++ local target_cluster=smart-update +++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.51lARm6q0r ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.KN8HDwg7HS +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.51lARm6q0r +++++ cat /tmp/tmp.KN8HDwg7HS +++++ rm /tmp/tmp.51lARm6q0r /tmp/tmp.KN8HDwg7HS +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo smart-update-haproxy ++++ return +++ local cluster_proxy=smart-update-haproxy +++ echo haproxy ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vGUwKMzuaX +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZLaQmVs74U ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vGUwKMzuaX ++ cat /tmp/tmp.ZLaQmVs74U ++ rm /tmp/tmp.vGUwKMzuaX /tmp/tmp.ZLaQmVs74U ++ return 0 + [[ 2 == \2 ]] + echo ++ seq 0 2 + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kNZqeUStqs +++ mktemp ++ local LAST_ERR=/tmp/tmp.cEdwQnSCyF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kNZqeUStqs ++ cat /tmp/tmp.cEdwQnSCyF ++ rm /tmp/tmp.kNZqeUStqs /tmp/tmp.cEdwQnSCyF ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gaYU3WBSX4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.p3Wcnc4j1g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gaYU3WBSX4 ++ cat /tmp/tmp.p3Wcnc4j1g ++ rm /tmp/tmp.gaYU3WBSX4 /tmp/tmp.p3Wcnc4j1g ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7KGfHVtECj +++ mktemp ++ local LAST_ERR=/tmp/tmp.9hlwTTeSkR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7KGfHVtECj ++ cat /tmp/tmp.9hlwTTeSkR ++ rm /tmp/tmp.7KGfHVtECj /tmp/tmp.9hlwTTeSkR ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + kubectl_bin delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml ++ mktemp + local LAST_OUT=/tmp/tmp.tQUdplZqjX ++ mktemp + local LAST_ERR=/tmp/tmp.WC3Qpq3bHn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tQUdplZqjX perconaxtradbcluster.pxc.percona.com "smart-update" deleted from smart-update2-11941 namespace + cat /tmp/tmp.WC3Qpq3bHn + rm /tmp/tmp.tQUdplZqjX /tmp/tmp.WC3Qpq3bHn + return 0 + kubectl_bin delete pvc --all ++ mktemp + local LAST_OUT=/tmp/tmp.R3uRk9VGqR ++ mktemp + local LAST_ERR=/tmp/tmp.yNeyFlc10T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pvc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.R3uRk9VGqR persistentvolumeclaim "datadir-smart-update-pxc-0" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-1" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-2" deleted from smart-update2-11941 namespace + cat /tmp/tmp.yNeyFlc10T + rm /tmp/tmp.R3uRk9VGqR /tmp/tmp.yNeyFlc10T + return 0 + desc 'PXC cluster update with the latest image by version service' + set +o xtrace ----------------------------------------------------------------------------------- PXC cluster update with the latest image by version service ----------------------------------------------------------------------------------- + spinup_pxc smart-update /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local cluster=smart-update + local config=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6689xigYax ++ mktemp + local LAST_ERR=/tmp/tmp.F2hEgUTnDZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6689xigYax secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.F2hEgUTnDZ + rm /tmp/tmp.6689xigYax /tmp/tmp.F2hEgUTnDZ + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml '' + kubectl_bin apply -f - + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + local LAST_OUT=/tmp/tmp.92AVt68ipq + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.TJ7hYFqiXH + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.92AVt68ipq deployment.apps/pxc-client unchanged + cat /tmp/tmp.TJ7hYFqiXH + rm /tmp/tmp.92AVt68ipq /tmp/tmp.TJ7hYFqiXH + return 0 + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + apply_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local config_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml '' + local input_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + cat /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml ++ mktemp + local LAST_OUT=/tmp/tmp.o2O3OSL5QW + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' ++ mktemp + local LAST_ERR=/tmp/tmp.5OttJtYEOo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.o2O3OSL5QW perconaxtradbcluster.pxc.percona.com/smart-update created + cat /tmp/tmp.5OttJtYEOo + rm /tmp/tmp.o2O3OSL5QW /tmp/tmp.5OttJtYEOo + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy smart-update ++ local target_cluster=smart-update +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CBMtKnRkxI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lxowMaKQHa +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.CBMtKnRkxI +++ cat /tmp/tmp.lxowMaKQHa +++ rm /tmp/tmp.CBMtKnRkxI /tmp/tmp.lxowMaKQHa +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo smart-update-haproxy ++ return + local proxy=smart-update-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.LJwaW5CtWp ++ mktemp + local LAST_ERR=/tmp/tmp.emqW5RypXc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.LJwaW5CtWp + cat /tmp/tmp.emqW5RypXc error: no matching resources found + rm /tmp/tmp.LJwaW5CtWp /tmp/tmp.emqW5RypXc + return 1 + true + wait_for_running smart-update-haproxy 1 + local name=smart-update-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-haproxy-0 480 + local pod=smart-update-haproxy-0 + local max_retry=480 + local ns= ++ echo smart-update-haproxy-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/smart-update-haproxy-0 condition met waiting for pod/smart-update-haproxy-0 to become Ready.Ok + wait_for_running smart-update-pxc 3 + local name=smart-update-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-0 480 + local pod=smart-update-pxc-0 + local max_retry=480 + local ns= ++ echo smart-update-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-0 condition met waiting for pod/smart-update-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-1 480 + local pod=smart-update-pxc-1 + local max_retry=480 + local ns= ++ echo smart-update-pxc-1 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/smart-update-pxc-1 condition met waiting for pod/smart-update-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-2 480 + local pod=smart-update-pxc-2 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo smart-update-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/smart-update-pxc-2 condition met waiting for pod/smart-update-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc smart-update -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.PRIKzsuyYk +++ mktemp ++ local LAST_ERR=/tmp/tmp.3flBmDDyRH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PRIKzsuyYk ++ cat /tmp/tmp.3flBmDDyRH ++ rm /tmp/tmp.PRIKzsuyYk /tmp/tmp.3flBmDDyRH ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nP2f5Vjha4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sW7yyortYd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nP2f5Vjha4 ++ cat /tmp/tmp.sW7yyortYd ++ rm /tmp/tmp.nP2f5Vjha4 /tmp/tmp.sW7yyortYd ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bFtUCD1Os3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WStjlwYypt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bFtUCD1Os3 ++ cat /tmp/tmp.WStjlwYypt ++ rm /tmp/tmp.bFtUCD1Os3 /tmp/tmp.WStjlwYypt ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dehTk04PLB +++ mktemp ++ local LAST_ERR=/tmp/tmp.otP5m0LmPP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dehTk04PLB ++ cat /tmp/tmp.otP5m0LmPP ++ rm /tmp/tmp.dehTk04PLB /tmp/tmp.otP5m0LmPP ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kJh5PCNOnu +++ mktemp ++ local LAST_ERR=/tmp/tmp.4GlckJXo7m ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kJh5PCNOnu ++ cat /tmp/tmp.4GlckJXo7m ++ rm /tmp/tmp.kJh5PCNOnu /tmp/tmp.4GlckJXo7m ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mkq4ygDUpv +++ mktemp ++ local LAST_ERR=/tmp/tmp.7VjhQLrt1l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Mkq4ygDUpv ++ cat /tmp/tmp.7VjhQLrt1l ++ rm /tmp/tmp.Mkq4ygDUpv /tmp/tmp.7VjhQLrt1l ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + is_keyring_plugin_in_use smart-update + local cluster=smart-update + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + grep -E -o 'early-plugin-load=keyring_\w+.so' + kubectl exec -it smart-update-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' Unable to use a TTY - input is not a terminal or the right kind of file + return 1 + vs_image=latest ++ run_mysql 'SELECT @@hostname hostname;' '-h smart-update-haproxy -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h smart-update-haproxy -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uyVVzh5Mkv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YiACav5kJ6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uyVVzh5Mkv +++ cat /tmp/tmp.YiACav5kJ6 +++ rm /tmp/tmp.uyVVzh5Mkv /tmp/tmp.YiACav5kJ6 +++ return 0 ++ client_pod=pxc-client-698c5fbfc7-rkjs4 ++ wait_pod pxc-client-698c5fbfc7-rkjs4 ++ local pod=pxc-client-698c5fbfc7-rkjs4 ++ local max_retry=480 ++ local ns= +++ grep -E '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ echo pxc-client-698c5fbfc7-rkjs4 ++ local container= ++ set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + initial_primary=smart-update-pxc-0 + kubectl_bin patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"latest","schedule": "* * * * *"}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.nAhMW9Jy0v ++ mktemp + local LAST_ERR=/tmp/tmp.JmDV156NAF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"latest","schedule": "* * * * *"}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nAhMW9Jy0v perconaxtradbcluster.pxc.percona.com/smart-update patched + cat /tmp/tmp.JmDV156NAF + rm /tmp/tmp.nAhMW9Jy0v /tmp/tmp.JmDV156NAF + return 0 + sleep 55 + check_last_pod_to_update smart-update smart-update-pxc-0 3 perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + local cluster=smart-update + local initial_primary=smart-update-pxc-0 + local pxc_size=3 + local target_image=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + set +x Waiting for the last pod to update+ wait_cluster_consistency smart-update 3 2 + local cluster_name=smart-update + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/smart-update to be ready' waiting for pxc/smart-update to be ready++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mqc0DW5giO +++ mktemp ++ local LAST_ERR=/tmp/tmp.yO2MmxD3H1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mqc0DW5giO ++ cat /tmp/tmp.yO2MmxD3H1 ++ rm /tmp/tmp.mqc0DW5giO /tmp/tmp.yO2MmxD3H1 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NTadcpZF39 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gzbylvuPTL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NTadcpZF39 ++ cat /tmp/tmp.gzbylvuPTL ++ rm /tmp/tmp.NTadcpZF39 /tmp/tmp.gzbylvuPTL ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine smart-update +++ local cluster_name=smart-update ++++ get_proxy smart-update ++++ local target_cluster=smart-update +++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.PWkEC7zsqO ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ToH7DQIbUc +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.PWkEC7zsqO +++++ cat /tmp/tmp.ToH7DQIbUc +++++ rm /tmp/tmp.PWkEC7zsqO /tmp/tmp.ToH7DQIbUc +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo smart-update-haproxy ++++ return +++ local cluster_proxy=smart-update-haproxy +++ echo haproxy ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4TIkJc0aNc +++ mktemp ++ local LAST_ERR=/tmp/tmp.QcsKrlYNzw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4TIkJc0aNc ++ cat /tmp/tmp.QcsKrlYNzw ++ rm /tmp/tmp.4TIkJc0aNc /tmp/tmp.QcsKrlYNzw ++ return 0 + [[ 2 == \2 ]] + echo ++ seq 0 2 + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.juGuBZO3xf +++ mktemp ++ local LAST_ERR=/tmp/tmp.I1fIYiS3iS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.juGuBZO3xf ++ cat /tmp/tmp.I1fIYiS3iS ++ rm /tmp/tmp.juGuBZO3xf /tmp/tmp.I1fIYiS3iS ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QztxhrdWuI +++ mktemp ++ local LAST_ERR=/tmp/tmp.WQW5IP3EP8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QztxhrdWuI ++ cat /tmp/tmp.WQW5IP3EP8 ++ rm /tmp/tmp.QztxhrdWuI /tmp/tmp.WQW5IP3EP8 ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rq9OE2R6qV +++ mktemp ++ local LAST_ERR=/tmp/tmp.kGlt5Tzyji ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Rq9OE2R6qV ++ cat /tmp/tmp.kGlt5Tzyji ++ rm /tmp/tmp.Rq9OE2R6qV /tmp/tmp.kGlt5Tzyji ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-698c5fbfc7-rkjs4 + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + kubectl_bin delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fbndgOGDfu ++ mktemp + local LAST_ERR=/tmp/tmp.EgXL887v6M + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fbndgOGDfu perconaxtradbcluster.pxc.percona.com "smart-update" deleted from smart-update2-11941 namespace + cat /tmp/tmp.EgXL887v6M + rm /tmp/tmp.fbndgOGDfu /tmp/tmp.EgXL887v6M + return 0 + kubectl_bin delete pvc --all ++ mktemp + local LAST_OUT=/tmp/tmp.EBLKkwRjGE ++ mktemp + local LAST_ERR=/tmp/tmp.aXSUUOd2zN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pvc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EBLKkwRjGE persistentvolumeclaim "datadir-smart-update-pxc-0" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-1" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-2" deleted from smart-update2-11941 namespace + cat /tmp/tmp.aXSUUOd2zN + rm /tmp/tmp.EBLKkwRjGE /tmp/tmp.aXSUUOd2zN + return 0 + desc 'PXC cluster update with explicitly specified image inside version service' + set +o xtrace ----------------------------------------------------------------------------------- PXC cluster update with explicitly specified image inside version service ----------------------------------------------------------------------------------- + spinup_pxc smart-update /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local cluster=smart-update + local config=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Pf9jU28hOa ++ mktemp + local LAST_ERR=/tmp/tmp.CvgyOpcZLw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Pf9jU28hOa secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.CvgyOpcZLw + rm /tmp/tmp.Pf9jU28hOa /tmp/tmp.CvgyOpcZLw + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ + local LAST_OUT=/tmp/tmp.8ViUolSye7 + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.FcGNKQ48jp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8ViUolSye7 deployment.apps/pxc-client unchanged + cat /tmp/tmp.FcGNKQ48jp + rm /tmp/tmp.8ViUolSye7 /tmp/tmp.FcGNKQ48jp + return 0 + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + apply_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local config_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + '[' -z '' ']' + cat_config /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml '' + local input_file=/tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + local pvc_name= + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2349-b5e2b8a7#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' + cat /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.41-32.1#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update2-11941~ ++ mktemp + local LAST_OUT=/tmp/tmp.WLJcDV31c4 ++ mktemp + local LAST_ERR=/tmp/tmp.iahsu7yDbj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WLJcDV31c4 perconaxtradbcluster.pxc.percona.com/smart-update created + cat /tmp/tmp.iahsu7yDbj + rm /tmp/tmp.WLJcDV31c4 /tmp/tmp.iahsu7yDbj + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy smart-update ++ local target_cluster=smart-update +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WOYeAJ5DRI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8oZjLsW31B +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.WOYeAJ5DRI +++ cat /tmp/tmp.8oZjLsW31B +++ rm /tmp/tmp.WOYeAJ5DRI /tmp/tmp.8oZjLsW31B +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo smart-update-haproxy ++ return + local proxy=smart-update-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 ++ mktemp + local LAST_OUT=/tmp/tmp.dycwMPN34V ++ mktemp + local LAST_ERR=/tmp/tmp.jwXViy8d4H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update2-11941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.dycwMPN34V + cat /tmp/tmp.jwXViy8d4H error: no matching resources found + rm /tmp/tmp.dycwMPN34V /tmp/tmp.jwXViy8d4H + return 1 + true + wait_for_running smart-update-haproxy 1 + local name=smart-update-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-haproxy-0 480 + local pod=smart-update-haproxy-0 + local max_retry=480 + local ns= ++ echo smart-update-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/smart-update-haproxy-0 condition met waiting for pod/smart-update-haproxy-0 to become Ready.Ok + wait_for_running smart-update-pxc 3 + local name=smart-update-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-0 480 + local pod=smart-update-pxc-0 + local max_retry=480 + local ns= ++ echo smart-update-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-0 condition met waiting for pod/smart-update-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-1 480 + local pod=smart-update-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo smart-update-pxc-1 + local container=pxc + set +o xtrace pod/smart-update-pxc-1 condition met waiting for pod/smart-update-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-2 480 + local pod=smart-update-pxc-2 + local max_retry=480 + local ns= ++ echo smart-update-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-2 condition met waiting for pod/smart-update-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc smart-update -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.SjZaXB42uW +++ mktemp ++ local LAST_ERR=/tmp/tmp.aE09f237P8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SjZaXB42uW ++ cat /tmp/tmp.aE09f237P8 ++ rm /tmp/tmp.SjZaXB42uW /tmp/tmp.aE09f237P8 ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZsmmBF9RO9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.olKvQ3rlCm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZsmmBF9RO9 ++ cat /tmp/tmp.olKvQ3rlCm ++ rm /tmp/tmp.ZsmmBF9RO9 /tmp/tmp.olKvQ3rlCm ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h smart-update-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ks4ITdizXP +++ mktemp ++ local LAST_ERR=/tmp/tmp.02LlpnVnei ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ks4ITdizXP ++ cat /tmp/tmp.02LlpnVnei ++ rm /tmp/tmp.ks4ITdizXP /tmp/tmp.02LlpnVnei ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4czZOfBNw7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ix1euh1dN7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4czZOfBNw7 ++ cat /tmp/tmp.Ix1euh1dN7 ++ rm /tmp/tmp.4czZOfBNw7 /tmp/tmp.Ix1euh1dN7 ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2q3mDTkWyJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kXSCi5ebwy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2q3mDTkWyJ ++ cat /tmp/tmp.kXSCi5ebwy ++ rm /tmp/tmp.2q3mDTkWyJ /tmp/tmp.kXSCi5ebwy ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yp5SSOKKth +++ mktemp ++ local LAST_ERR=/tmp/tmp.TyDxERhN2m ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Yp5SSOKKth ++ cat /tmp/tmp.TyDxERhN2m ++ rm /tmp/tmp.Yp5SSOKKth /tmp/tmp.TyDxERhN2m ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + is_keyring_plugin_in_use smart-update + local cluster=smart-update + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + grep -E -o 'early-plugin-load=keyring_\w+.so' + kubectl exec -it smart-update-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' Unable to use a TTY - input is not a terminal or the right kind of file + return 1 ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ sort -V ++ head -n1 ++ tail -n2 +++ get_operator_pod +++ local label_prefix=app.kubernetes.io/ ++++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++++ grep -c percona-xtradb-cluster-operator ++ grep :8.0 +++ local check_label=1 +++ [[ 1 -eq 0 ]] +++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TdLmEiuCwn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1w6K2hsJ2O +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' +++ head -1 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TdLmEiuCwn +++ cat /tmp/tmp.1w6K2hsJ2O +++ rm /tmp/tmp.TdLmEiuCwn /tmp/tmp.1w6K2hsJ2O +++ return 0 ++ kubectl_bin exec -ti percona-xtradb-cluster-operator-86b4bcffd-7jh8x -n pxc-operator -- curl -s http://version-service.smart-update2-11941.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 +++ mktemp ++ local LAST_OUT=/tmp/tmp.z2sGaiCCsC +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ideh1hBJ52 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -ti percona-xtradb-cluster-operator-86b4bcffd-7jh8x -n pxc-operator -- curl -s http://version-service.smart-update2-11941.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.z2sGaiCCsC ++ cat /tmp/tmp.Ideh1hBJ52 Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.z2sGaiCCsC /tmp/tmp.Ideh1hBJ52 ++ return 0 + vs_image=percona/percona-xtradb-cluster:8.0.41-32.1 ++ run_mysql 'SELECT @@hostname hostname;' '-h smart-update-haproxy -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h smart-update-haproxy -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sv08vZoBNw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vJ6BxMKenE +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.sv08vZoBNw +++ cat /tmp/tmp.vJ6BxMKenE +++ rm /tmp/tmp.sv08vZoBNw /tmp/tmp.vJ6BxMKenE +++ return 0 ++ client_pod=pxc-client-698c5fbfc7-rkjs4 ++ wait_pod pxc-client-698c5fbfc7-rkjs4 ++ local pod=pxc-client-698c5fbfc7-rkjs4 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-698c5fbfc7-rkjs4 +++ grep -E '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + initial_primary=smart-update-pxc-0 + kubectl_bin patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"percona/percona-xtradb-cluster:8.0.41-32.1","schedule": "* * * * *"}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.qJBqZcXE23 ++ mktemp + local LAST_ERR=/tmp/tmp.KaeTfWo1d3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc/smart-update --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"http://version-service:11000","apply":"percona/percona-xtradb-cluster:8.0.41-32.1","schedule": "* * * * *"}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qJBqZcXE23 perconaxtradbcluster.pxc.percona.com/smart-update patched + cat /tmp/tmp.KaeTfWo1d3 + rm /tmp/tmp.qJBqZcXE23 /tmp/tmp.KaeTfWo1d3 + return 0 + sleep 55 + check_last_pod_to_update smart-update smart-update-pxc-0 3 percona/percona-xtradb-cluster:percona/percona-xtradb-cluster:8.0.41-32.1 + local cluster=smart-update + local initial_primary=smart-update-pxc-0 + local pxc_size=3 + local target_image=percona/percona-xtradb-cluster:percona/percona-xtradb-cluster:8.0.41-32.1 + set +x Waiting for the last pod to update+ wait_cluster_consistency smart-update 3 2 + local cluster_name=smart-update + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/smart-update to be ready' waiting for pxc/smart-update to be ready++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lXx9gywxEt +++ mktemp ++ local LAST_ERR=/tmp/tmp.xJhva36wD9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lXx9gywxEt ++ cat /tmp/tmp.xJhva36wD9 ++ rm /tmp/tmp.lXx9gywxEt /tmp/tmp.xJhva36wD9 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Et3VeOZQEU +++ mktemp ++ local LAST_ERR=/tmp/tmp.255hUh5brK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Et3VeOZQEU ++ cat /tmp/tmp.255hUh5brK ++ rm /tmp/tmp.Et3VeOZQEU /tmp/tmp.255hUh5brK ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine smart-update +++ local cluster_name=smart-update ++++ get_proxy smart-update ++++ local target_cluster=smart-update +++++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.dvlV6QZDyd ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.t6JHMP23gj +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.dvlV6QZDyd +++++ cat /tmp/tmp.t6JHMP23gj +++++ rm /tmp/tmp.dvlV6QZDyd /tmp/tmp.t6JHMP23gj +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo smart-update-haproxy ++++ return +++ local cluster_proxy=smart-update-haproxy +++ echo haproxy ++ kubectl_bin get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.efPirL2dRT +++ mktemp ++ local LAST_ERR=/tmp/tmp.4DWkQppVQs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc smart-update -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.efPirL2dRT ++ cat /tmp/tmp.4DWkQppVQs ++ rm /tmp/tmp.efPirL2dRT /tmp/tmp.4DWkQppVQs ++ return 0 + [[ 2 == \2 ]] + echo ++ seq 0 2 + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-0.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Ryh3WLq6s +++ mktemp ++ local LAST_ERR=/tmp/tmp.kRsSLBkT2w ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9Ryh3WLq6s ++ cat /tmp/tmp.kRsSLBkT2w ++ rm /tmp/tmp.9Ryh3WLq6s /tmp/tmp.kRsSLBkT2w ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-698c5fbfc7-rkjs4 + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-1.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nGJTdHgXKQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.iTqeYkPRLc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nGJTdHgXKQ ++ cat /tmp/tmp.iTqeYkPRLc ++ rm /tmp/tmp.nGJTdHgXKQ /tmp/tmp.iTqeYkPRLc ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + for i in '$(seq 0 $((CLUSTER_SIZE - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.41-32.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h smart-update-pxc-2.smart-update-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tkDRieUmar +++ mktemp ++ local LAST_ERR=/tmp/tmp.m4l0mRLSeq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tkDRieUmar ++ cat /tmp/tmp.m4l0mRLSeq ++ rm /tmp/tmp.tkDRieUmar /tmp/tmp.m4l0mRLSeq ++ return 0 + client_pod=pxc-client-698c5fbfc7-rkjs4 + wait_pod pxc-client-698c5fbfc7-rkjs4 + local pod=pxc-client-698c5fbfc7-rkjs4 + local max_retry=480 + local ns= ++ echo pxc-client-698c5fbfc7-rkjs4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-698c5fbfc7-rkjs4 condition met waiting for pod/pxc-client-698c5fbfc7-rkjs4 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.2CZMOzEkaK/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.2CZMOzEkaK/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/compare/select-1.sql /tmp/tmp.2CZMOzEkaK/select-1.sql + kubectl_bin delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml ++ mktemp + local LAST_OUT=/tmp/tmp.D37THnrIol ++ mktemp + local LAST_ERR=/tmp/tmp.7wLKrzebQB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /tmp/tmp.2CZMOzEkaK/smart-update-version-service-unreachable.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D37THnrIol perconaxtradbcluster.pxc.percona.com "smart-update" deleted from smart-update2-11941 namespace + cat /tmp/tmp.7wLKrzebQB + rm /tmp/tmp.D37THnrIol /tmp/tmp.7wLKrzebQB + return 0 + kubectl_bin delete pvc --all ++ mktemp + local LAST_OUT=/tmp/tmp.T7UHy7889S ++ mktemp + local LAST_ERR=/tmp/tmp.HJzPwTaQ0Z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pvc --all + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.T7UHy7889S persistentvolumeclaim "datadir-smart-update-pxc-0" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-1" deleted from smart-update2-11941 namespace persistentvolumeclaim "datadir-smart-update-pxc-2" deleted from smart-update2-11941 namespace + cat /tmp/tmp.HJzPwTaQ0Z + rm /tmp/tmp.T7UHy7889S /tmp/tmp.HJzPwTaQ0Z + return 0 + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6MPk9DWfor ++ mktemp + local LAST_ERR=/tmp/tmp.Js5BEQkp1U + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2349/e2e-tests/smart-update2/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6MPk9DWfor deployment.apps "version-service" deleted from smart-update2-11941 namespace service "version-service" deleted from smart-update2-11941 namespace + cat /tmp/tmp.Js5BEQkp1U + rm /tmp/tmp.6MPk9DWfor /tmp/tmp.Js5BEQkp1U + return 0 + destroy smart-update2-11941 + local namespace=smart-update2-11941 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + tee /tmp/tmp.2CZMOzEkaK/operator.log + grep -v 'get backup status: Job.batch' +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator + sort -u ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ head -1 ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.A4dCnlwMAH +++ mktemp ++ local LAST_ERR=/tmp/tmp.CPEV7b2XSt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.A4dCnlwMAH ++ cat /tmp/tmp.CPEV7b2XSt ++ rm /tmp/tmp.A4dCnlwMAH /tmp/tmp.CPEV7b2XSt ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-7jh8x ++ mktemp + local LAST_OUT=/tmp/tmp.G41BeK94H8 ++ mktemp + local LAST_ERR=/tmp/tmp.zhzn1nywER + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-86b4bcffd-7jh8x + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.G41BeK94H8 + cat /tmp/tmp.zhzn1nywER + rm /tmp/tmp.G41BeK94H8 /tmp/tmp.zhzn1nywER + return 0 2026/01/22 04:17:07 http: TLS handshake error from 10.15.106.2:40878: EOF 2026-01-22T04:16:47.945Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2026-01-22T04:16:47.945Z INFO setup Manager starting up {"gitCommit": "b5e2b8a7faedff3e7f7ee1efcdba55b4de934082", "gitBranch": "PR-2349-b5e2b8a7", "buildTime": "2026-01-22T01:12:37Z", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} 2026-01-22T04:16:47.945Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.14-gke.1214000"} 2026-01-22T04:16:47.948Z INFO setup Registering Components. 2026-01-22T04:16:48.428Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2026-01-22T04:16:48.428Z INFO controller-runtime.metrics Starting metrics server 2026-01-22T04:16:48.428Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2026-01-22T04:16:48.428Z INFO controller-runtime.webhook Starting webhook server 2026-01-22T04:16:48.428Z INFO setup Starting the Cmd. 2026-01-22T04:16:48.428Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2026-01-22T04:16:48.429Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2026-01-22T04:16:48.429Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2026-01-22T04:16:48.429Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2026-01-22T04:16:48.629Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2026-01-22T04:17:07.087Z DEBUG events percona-xtradb-cluster-operator-86b4bcffd-7jh8x_b58ee2c8-7c4d-42e2-88a0-4874df4fb3a6 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"4544c05c-40f4-487e-a171-e307f47e34d5","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1769055427079519009"}, "reason": "LeaderElection"} 2026-01-22T04:17:07.087Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2026-01-22T04:17:07.087Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2026-01-22T04:17:07.087Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2026-01-22T04:17:07.087Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2026-01-22T04:17:07.087Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2026-01-22T04:17:07.187Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2026-01-22T04:17:07.188Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2026-01-22T04:17:07.188Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2026-01-22T04:17:07.188Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2026-01-22T04:17:07.189Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2026-01-22T04:17:07.189Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2026-01-22T04:17:08.363Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"minimal-cluster","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "minimal-cluster", "reconcileID": "c92c3da1-d764-4f14-b781-db766758ad16", "error": "failed to update cr finalizers: Internal error occurred: failed calling webhook \"validationwebhook.pxc.percona.com\": failed to call webhook: Post \"https://percona-xtradb-cluster-operator.pxc-operator.svc:443/validate-percona-xtradbcluster?timeout=10s\": tls: failed to verify certificate: x509: certificate signed by unknown authority (possibly because of \"crypto/rsa: verification error\" while trying to verify candidate authority certificate \"Root CA\")", "errorVerbose": "Internal error occurred: failed calling webhook \"validationwebhook.pxc.percona.com\": failed to call webhook: Post \"https://percona-xtradb-cluster-operator.pxc-operator.svc:443/validate-percona-xtradbcluster?timeout=10s\": tls: failed to verify certificate: x509: certificate signed by unknown authority (possibly because of \"crypto/rsa: verification error\" while trying to verify candidate authority certificate \"Root CA\")\nfailed to update cr finalizers\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:308\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:17:12.990Z ERROR Update status {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"minimal-cluster","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "minimal-cluster", "reconcileID": "470b6583-e292-44c7-9e44-2bf3e25a6203", "error": "PerconaXtraDBCluster.pxc.percona.com \"minimal-cluster\" not found"} 2026-01-22T04:17:48.042Z INFO spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0"} 2026-01-22T04:17:51.257Z INFO spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0"} 2026-01-22T04:17:51.324Z INFO spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0"} 2026-01-22T04:17:54.425Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:17:54.443Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:17:55.002Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "818272e3-e91e-4d15-b09a-ebbea914a6a0", "error": "failed to deploy haproxy: updatePod for haproxy: reconcile config: reconcile autotune config: create or update configmap: configmaps \"auto-smart-update-pxc\" already exists", "errorVerbose": "configmaps \"auto-smart-update-pxc\" already exists\ncreate or update configmap\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileAutotuneConfigMap\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:92\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:25\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile autotune config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:27\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:50\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nupdatePod for haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:566\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to deploy haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:579\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:17:55.121Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:17:55.162Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:17:55.218Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:17:55.333Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:17:55.479Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:17:55.723Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "3b646160-667a-4604-9d1c-00ba68cafd72", "object": "smart-update-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:17:56.705Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "9970bad6-a054-4738-b934-cc0821ae25e0", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:17:56.725Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "9970bad6-a054-4738-b934-cc0821ae25e0", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:19:08.464Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f", "user": "operator"} 2026-01-22T04:19:08.507Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f", "user": "monitor"} 2026-01-22T04:19:08.611Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f"} 2026-01-22T04:19:08.658Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f"} 2026-01-22T04:19:08.701Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f", "user": "xtrabackup"} 2026-01-22T04:19:08.758Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f"} 2026-01-22T04:19:08.805Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f", "user": "replication"} 2026-01-22T04:19:10.812Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4005f858-91ef-4bf8-a9f2-e5078fcf793f", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.230.228:3306: connect: connection refused"} 2026-01-22T04:21:38.355Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "87dc144b-0fb7-487a-bb60-e37568a4fafd", "user": "root"} 2026-01-22T04:21:39.461Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "87dc144b-0fb7-487a-bb60-e37568a4fafd", "new version": "8.0.41-32.1"} 2026-01-22T04:23:24.289Z INFO add new job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4d3c4bcb-494c-48b8-956a-24436fe0ac64", "name": "ensure-version/smart-update2-11941/smart-update", "schedule": "* * * * *"} 2026-01-22T04:24:24.001Z DEBUG Use version service endpoint {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4d3c4bcb-494c-48b8-956a-24436fe0ac64", "endpoint": "http://version-service.smart-update2-11941.svc.cluster.local:11000"} 2026-01-22T04:24:24.038Z ERROR failed to ensure version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4d3c4bcb-494c-48b8-956a-24436fe0ac64", "error": "failed to get new versions: failed to check version: Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/recommended?clusterWideEnabled=true&customResourceUid=b8c2e865-3847-4a59-b123-c1a04b6b842b&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host", "errorVerbose": "Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/recommended?clusterWideEnabled=true&customResourceUid=b8c2e865-3847-4a59-b123-c1a04b6b842b&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host\nfailed to check version\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).getNewVersions\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:283\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:300\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to get new versions\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:302\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:25:29.989Z ERROR Update status {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "abed6388-878a-4106-90b4-56dbc278b1b4", "error": "PerconaXtraDBCluster.pxc.percona.com \"smart-update\" not found"} 2026-01-22T04:25:29.989Z INFO cluster is not found, deleting the job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "4d3c4bcb-494c-48b8-956a-24436fe0ac64", "name": "ensure-version/smart-update2-11941/smart-update", "cluster": "smart-update", "namespace": "smart-update2-11941"} 2026-01-22T04:25:47.516Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "692bfaf3-4d9b-451b-a5e1-6148c2da3c49", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:25:47.538Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "692bfaf3-4d9b-451b-a5e1-6148c2da3c49", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:25:48.130Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "692bfaf3-4d9b-451b-a5e1-6148c2da3c49", "error": "failed to deploy haproxy: updatePod for haproxy: reconcile config: reconcile autotune config: create or update configmap: configmaps \"auto-smart-update-pxc\" already exists", "errorVerbose": "configmaps \"auto-smart-update-pxc\" already exists\ncreate or update configmap\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileAutotuneConfigMap\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:92\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:25\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile autotune config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:27\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:50\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nupdatePod for haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:566\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to deploy haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:579\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:25:48.250Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:25:48.286Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:25:48.332Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:25:48.393Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:25:48.457Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:25:48.529Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "97c78186-afed-4da4-9974-1c94c4d37251", "object": "smart-update-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:25:49.146Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "cfab124f-4d2c-4435-a9b3-43bef725a022", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:25:49.191Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "cfab124f-4d2c-4435-a9b3-43bef725a022", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:29:29.445Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "459d393e-2669-44cc-a503-bef35f369159", "new version": "8.0.41-32.1"} 2026-01-22T04:30:52.462Z INFO add new job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "2069355a-6cbe-4db7-8372-8caec971b819", "name": "ensure-version/smart-update2-11941/smart-update", "schedule": "* * * * *"} 2026-01-22T04:31:52.221Z DEBUG Use version service endpoint {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "2069355a-6cbe-4db7-8372-8caec971b819", "endpoint": "http://version-service.smart-update2-11941.svc.cluster.local:11000"} 2026-01-22T04:31:52.278Z ERROR failed to ensure version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "2069355a-6cbe-4db7-8372-8caec971b819", "error": "failed to get new versions: failed to check version: Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/latest?clusterWideEnabled=true&customResourceUid=dcfe6ebb-db99-4c39-a0c8-6d5648671dbc&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host", "errorVerbose": "Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/latest?clusterWideEnabled=true&customResourceUid=dcfe6ebb-db99-4c39-a0c8-6d5648671dbc&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host\nfailed to check version\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).getNewVersions\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:283\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:300\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to get new versions\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:302\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:32:58.060Z ERROR Update status {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "0a0ebaf9-9770-4453-b076-310464609a15", "error": "PerconaXtraDBCluster.pxc.percona.com \"smart-update\" not found"} 2026-01-22T04:32:58.060Z INFO cluster is not found, deleting the job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "2069355a-6cbe-4db7-8372-8caec971b819", "name": "ensure-version/smart-update2-11941/smart-update", "cluster": "smart-update", "namespace": "smart-update2-11941"} 2026-01-22T04:33:14.435Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "d43968bb-8a45-4b6c-8358-cb8dfe434ccd", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:33:14.453Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "d43968bb-8a45-4b6c-8358-cb8dfe434ccd", "object": "auto-smart-update-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-01-22T04:33:15.025Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "d43968bb-8a45-4b6c-8358-cb8dfe434ccd", "error": "failed to deploy haproxy: updatePod for haproxy: reconcile config: reconcile autotune config: create or update configmap: configmaps \"auto-smart-update-pxc\" already exists", "errorVerbose": "configmaps \"auto-smart-update-pxc\" already exists\ncreate or update configmap\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileAutotuneConfigMap\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:92\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:25\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile autotune config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).reconcileConfigMaps\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/config.go:27\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:48\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile config\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updatePod\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/upgrade.go:50\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:565\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nupdatePod for haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:566\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:578\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to deploy haproxy\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).deploy\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:579\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:367\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:33:15.133Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:33:15.171Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-01-22T04:33:15.224Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:33:15.278Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:33:15.394Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:33:15.604Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "8e550a42-7980-4cce-9f44-959e367e4930", "object": "smart-update-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-01-22T04:33:16.608Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "96ebd2f3-b0ef-4e66-99e8-422d66287b8b", "object": "smart-update-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:33:16.639Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "96ebd2f3-b0ef-4e66-99e8-422d66287b8b", "object": "smart-update-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-01-22T04:37:03.084Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "0d7802c5-ee27-4a7a-a0fa-812dd1cf52d3", "new version": "8.0.41-32.1"} 2026-01-22T04:38:23.875Z INFO add new job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "60da2fa4-9661-4014-9a12-2bea8430d09b", "name": "ensure-version/smart-update2-11941/smart-update", "schedule": "* * * * *"} 2026-01-22T04:39:23.001Z DEBUG Use version service endpoint {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "60da2fa4-9661-4014-9a12-2bea8430d09b", "endpoint": "http://version-service.smart-update2-11941.svc.cluster.local:11000"} 2026-01-22T04:39:23.074Z ERROR failed to ensure version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "60da2fa4-9661-4014-9a12-2bea8430d09b", "error": "failed to get new versions: failed to check version: Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/percona%2Fpercona-xtradb-cluster:8.0.41-32.1?clusterWideEnabled=true&customResourceUid=1c44c599-cb98-4a7f-94bc-d61ae480da95&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host", "errorVerbose": "Get \"http://version-service:11000/versions/v1/pxc-operator/9.9.9/percona%2Fpercona-xtradb-cluster:8.0.41-32.1?clusterWideEnabled=true&customResourceUid=1c44c599-cb98-4a7f-94bc-d61ae480da95&databaseVersion=8.0.41-32.1&kubeVersion=v1.31.14-gke.1214000&platform=kubernetes&userManagementEnabled=false\": dial tcp: lookup version-service on 34.118.224.10:53: no such host\nfailed to check version\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).getNewVersions\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:283\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:300\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nfailed to get new versions\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).ensurePXCVersion\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:302\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:212\ngithub.com/robfig/cron/v3.FuncJob.Run\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136\ngithub.com/robfig/cron/v3.(*Cron).startJob.func1\n\t/go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-01-22T04:40:26.363Z ERROR Update status {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "181289d3-39d8-450c-8d0a-3a625d4c61cb", "error": "PerconaXtraDBCluster.pxc.percona.com \"smart-update\" not found"} 2026-01-22T04:40:26.363Z INFO cluster is not found, deleting the job {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"smart-update","namespace":"smart-update2-11941"}, "namespace": "smart-update2-11941", "name": "smart-update", "reconcileID": "60da2fa4-9661-4014-9a12-2bea8430d09b", "name": "ensure-version/smart-update2-11941/smart-update", "cluster": "smart-update", "namespace": "smart-update2-11941"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile.func1 github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).scheduleEnsurePXCVersion.func1 github.com/robfig/cron/v3.(*Cron).startJob.func1 github.com/robfig/cron/v3.FuncJob.Run /go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:136 /go/pkg/mod/github.com/robfig/cron/v3@v3.0.1/cron.go:312 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:222 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:313 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:438 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:479 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.0/pkg/internal/controller/controller.go:495 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:259 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:491 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/version.go:214 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.GrzvS5eN1v ++ mktemp + local LAST_ERR=/tmp/tmp.uQgzJcl1T1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GrzvS5eN1v No resources found + cat /tmp/tmp.uQgzJcl1T1 + rm /tmp/tmp.GrzvS5eN1v /tmp/tmp.uQgzJcl1T1 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ALivZNgmcF ++ mktemp + local LAST_ERR=/tmp/tmp.3DRiUI34s5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ALivZNgmcF No resources found + cat /tmp/tmp.3DRiUI34s5 + rm /tmp/tmp.ALivZNgmcF /tmp/tmp.3DRiUI34s5 + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.6yzsgyqjvv ++ mktemp + local LAST_ERR=/tmp/tmp.TV7gqnuWZB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6yzsgyqjvv No resources found + cat /tmp/tmp.TV7gqnuWZB + rm /tmp/tmp.6yzsgyqjvv /tmp/tmp.TV7gqnuWZB + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.wXLfqSrXnV ++ mktemp + local LAST_ERR=/tmp/tmp.mZbfdOAwFl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wXLfqSrXnV validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.mZbfdOAwFl + rm /tmp/tmp.wXLfqSrXnV /tmp/tmp.mZbfdOAwFl + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml namespace "cert-manager" deleted + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.2CZMOzEkaK + kubectl_bin delete --grace-period=0 --force=true namespace smart-update2-11941 + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + desc 'test passed' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.rX5Om9DguS ++ mktemp + local LAST_ERR=/tmp/tmp.06d5YkJVqL + local exit_status=0 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.s55PUwwluF + for i in '$(seq 0 2)' + set +e ++ mktemp + kubectl delete --grace-period=0 --force=true namespace smart-update2-11941 + local LAST_ERR=/tmp/tmp.pFIByhBD2e + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator