Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/logs/proxy-protocol-8-0.log Warning: version difference between client (1.36) and server (1.33) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.33) exceeds the supported minor version skew of +/-1 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + prepare_config + desc 'prepare config' + set +o xtrace ----------------------------------------------------------------------------------- prepare config ----------------------------------------------------------------------------------- ++ kubectl get nodes ++ grep -v NAME ++ awk '{print $1}' ++ grep -v master + nodes='gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk' ++ printf 'gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk\n' ++ wc -l + '[' 3 -lt 3 ']' ++ echo 'gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk' ++ tail -n1 + haproxy_node=gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk ++ echo 'gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk' ++ head -n1 ++ tail -n2 + pxc_node=gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 ++ echo 'gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4 gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk' ++ tail -n3 ++ head -n1 + client_node=gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/conf/proxy-protocol.yml + /usr/bin/sed -e 's#kubernetes.io/hostname:.*-node1$#kubernetes.io/hostname: gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-w1xk#' + /usr/bin/sed -e 's#kubernetes.io/hostname:.*-node2$#kubernetes.io/hostname: gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-9qj4#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/conf/client.yml + /usr/bin/sed -e 's#kubernetes.io/hostname:.*-node3$#kubernetes.io/hostname: gke-jen-pxc-2467-3dc7f02-default-pool-110d245d-6gk2#' + create_infra proxy-protocol-24941 + local ns=proxy-protocol-24941 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n proxy-protocol-29489 proxy-protocol --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/proxy-protocol patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.75KZklFSWx ++ mktemp + local LAST_ERR=/tmp/tmp.AkB5AeX7u0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.75KZklFSWx perconaxtradbcluster.pxc.percona.com "proxy-protocol" deleted from proxy-protocol-29489 namespace + cat /tmp/tmp.AkB5AeX7u0 + rm /tmp/tmp.75KZklFSWx /tmp/tmp.AkB5AeX7u0 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.agS3lPOnTF ++ mktemp + local LAST_ERR=/tmp/tmp.h8YOdg5nLN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.agS3lPOnTF No resources found + cat /tmp/tmp.h8YOdg5nLN + rm /tmp/tmp.agS3lPOnTF /tmp/tmp.h8YOdg5nLN + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.0NMlw8iIhB ++ mktemp + local LAST_ERR=/tmp/tmp.3UsWXdMx3o + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0NMlw8iIhB No resources found + cat /tmp/tmp.3UsWXdMx3o + rm /tmp/tmp.0NMlw8iIhB /tmp/tmp.3UsWXdMx3o + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.r53gWxFUcg ++ mktemp + local LAST_ERR=/tmp/tmp.H1Ov8P4EZ0 + local exit_status=0 + local LAST_OUT=/tmp/tmp.yjomHxiBjt ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.xp2OWzgBUA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.r53gWxFUcg + cat /tmp/tmp.H1Ov8P4EZ0 + rm /tmp/tmp.r53gWxFUcg /tmp/tmp.H1Ov8P4EZ0 + return 0 namespace "proxy-protocol-29489" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yjomHxiBjt namespace "pxc-operator" deleted + cat /tmp/tmp.xp2OWzgBUA + rm /tmp/tmp.yjomHxiBjt /tmp/tmp.xp2OWzgBUA + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.JdQUjDlkJO ++ mktemp + local LAST_ERR=/tmp/tmp.j1I93jaxlo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JdQUjDlkJO namespace/pxc-operator created + cat /tmp/tmp.j1I93jaxlo + rm /tmp/tmp.JdQUjDlkJO /tmp/tmp.j1I93jaxlo + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.sd9oXBh3Sw +++ mktemp ++ local LAST_ERR=/tmp/tmp.KUbTdixPrs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sd9oXBh3Sw ++ cat /tmp/tmp.KUbTdixPrs ++ rm /tmp/tmp.sd9oXBh3Sw /tmp/tmp.KUbTdixPrs ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bUFn0KWHgl ++ mktemp + local LAST_ERR=/tmp/tmp.Q29Y2Vlt2y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bUFn0KWHgl Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1" modified. + cat /tmp/tmp.Q29Y2Vlt2y + rm /tmp/tmp.bUFn0KWHgl /tmp/tmp.Q29Y2Vlt2y + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.g8VExKCw1a ++ mktemp + local LAST_ERR=/tmp/tmp.kDqQsl5FFD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.g8VExKCw1a customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.kDqQsl5FFD + rm /tmp/tmp.g8VExKCw1a /tmp/tmp.kDqQsl5FFD + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.3aQR0apHGn ++ mktemp + local LAST_ERR=/tmp/tmp.bB0LJyz9kX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3aQR0apHGn clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.bB0LJyz9kX + rm /tmp/tmp.3aQR0apHGn /tmp/tmp.bB0LJyz9kX + return 0 + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PSiMXaOGAZ ++ mktemp + local LAST_ERR=/tmp/tmp.bnAIASdASs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PSiMXaOGAZ deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.bnAIASdASs + rm /tmp/tmp.PSiMXaOGAZ /tmp/tmp.bnAIASdASs + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.NmBdrgkbnZ ++ mktemp + local LAST_ERR=/tmp/tmp.15jMiqLAG6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NmBdrgkbnZ pod/percona-xtradb-cluster-operator-9d9fbdb5-xzkkr condition met + cat /tmp/tmp.15jMiqLAG6 E0516 20:05:59.835296 22502 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/pxc-operator/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpercona-xtradb-cluster-operator-9d9fbdb5-xzkkr&resourceVersion=1778961959474227000&timeoutSeconds=547&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" + rm /tmp/tmp.NmBdrgkbnZ /tmp/tmp.15jMiqLAG6 + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ head -1 ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.v1i0wFYKHW +++ mktemp ++ local LAST_ERR=/tmp/tmp.LVWzbhAiqA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.v1i0wFYKHW ++ cat /tmp/tmp.LVWzbhAiqA ++ rm /tmp/tmp.v1i0wFYKHW /tmp/tmp.LVWzbhAiqA ++ return 0 + wait_pod percona-xtradb-cluster-operator-9d9fbdb5-xzkkr 480 pxc-operator + local pod=percona-xtradb-cluster-operator-9d9fbdb5-xzkkr + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-9d9fbdb5-xzkkr ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-9d9fbdb5-xzkkr condition met E0516 20:06:07.585848 23590 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/pxc-operator/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpercona-xtradb-cluster-operator-9d9fbdb5-xzkkr&resourceVersion=1778961964536359000&timeoutSeconds=441&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/percona-xtradb-cluster-operator-9d9fbdb5-xzkkr to become Ready.Ok + sleep 3 + create_namespace proxy-protocol-24941 + local namespace=proxy-protocol-24941 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces proxy-protocol-24941' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces proxy-protocol-24941 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace proxy-protocol-24941 ++ mktemp + local LAST_OUT=/tmp/tmp.UW4uz05YCK + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + xargs kubectl delete ns + kubectl_bin get ns + awk '{print$1}' ++ mktemp + local LAST_ERR=/tmp/tmp.rNPUVWbAjZ + local exit_status=0 ++ mktemp + local LAST_OUT=/tmp/tmp.0aopZ6OXC8 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-protocol-24941 + local LAST_ERR=/tmp/tmp.SBACcXfevJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0aopZ6OXC8 + cat /tmp/tmp.SBACcXfevJ + rm /tmp/tmp.0aopZ6OXC8 /tmp/tmp.SBACcXfevJ + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-protocol-24941 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-protocol-24941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.UW4uz05YCK + cat /tmp/tmp.rNPUVWbAjZ Error from server (NotFound): namespaces "proxy-protocol-24941" not found + rm /tmp/tmp.UW4uz05YCK /tmp/tmp.rNPUVWbAjZ + return 1 + : + wait_for_delete namespace/proxy-protocol-24941 + local res=namespace/proxy-protocol-24941 + echo -n 'waiting for namespace/proxy-protocol-24941 to be deleted' waiting for namespace/proxy-protocol-24941 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "proxy-protocol-24941" not found + desc 'create namespace proxy-protocol-24941' + set +o xtrace ----------------------------------------------------------------------------------- create namespace proxy-protocol-24941 ----------------------------------------------------------------------------------- + kubectl_bin create namespace proxy-protocol-24941 ++ mktemp + local LAST_OUT=/tmp/tmp.XAGtszFCUV ++ mktemp + local LAST_ERR=/tmp/tmp.YBzYXmAd9U + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace proxy-protocol-24941 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XAGtszFCUV namespace/proxy-protocol-24941 created + cat /tmp/tmp.YBzYXmAd9U + rm /tmp/tmp.XAGtszFCUV /tmp/tmp.YBzYXmAd9U + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BuTAKss34J +++ mktemp ++ local LAST_ERR=/tmp/tmp.VatYPCIuqA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BuTAKss34J ++ cat /tmp/tmp.VatYPCIuqA ++ rm /tmp/tmp.BuTAKss34J /tmp/tmp.VatYPCIuqA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1 --namespace=proxy-protocol-24941 ++ mktemp + local LAST_OUT=/tmp/tmp.Ncfu0XfG78 ++ mktemp + local LAST_ERR=/tmp/tmp.DigLLokAsK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1 --namespace=proxy-protocol-24941 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ncfu0XfG78 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster1" modified. + cat /tmp/tmp.DigLLokAsK + rm /tmp/tmp.Ncfu0XfG78 /tmp/tmp.DigLLokAsK + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.V2M10ItlW6 ++ mktemp + local LAST_ERR=/tmp/tmp.Gb5grgPW8r + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.V2M10ItlW6 secret/minio-secret created secret/aws-s3-secret created secret/do-spaces-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.Gb5grgPW8r + rm /tmp/tmp.V2M10ItlW6 /tmp/tmp.Gb5grgPW8r + return 0 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc proxy-protocol /tmp/tmp.AHGqOixxgz/proxy-protocol.yml 3 15 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/secrets.yml /tmp/tmp.AHGqOixxgz/client.yml 33062 + local cluster=proxy-protocol + local config=/tmp/tmp.AHGqOixxgz/proxy-protocol.yml + local size=3 + local sleep=15 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/secrets.yml + local pxcClientFile=/tmp/tmp.AHGqOixxgz/client.yml + local port=33062 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4DpYrU7qgS ++ mktemp + local LAST_ERR=/tmp/tmp.sCyL4AkywL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4DpYrU7qgS secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.sCyL4AkywL + rm /tmp/tmp.4DpYrU7qgS /tmp/tmp.sCyL4AkywL + return 0 + apply_config /tmp/tmp.AHGqOixxgz/client.yml + local config_file=/tmp/tmp.AHGqOixxgz/client.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.AHGqOixxgz/client.yml '' + local input_file=/tmp/tmp.AHGqOixxgz/client.yml + local pvc_name= + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.proxy-protocol-24941~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + cat /tmp/tmp.AHGqOixxgz/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.zZqrcqacbS ++ mktemp + local LAST_ERR=/tmp/tmp.u1ye3oZa8g + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zZqrcqacbS deployment.apps/pxc-client created + cat /tmp/tmp.u1ye3oZa8g + rm /tmp/tmp.zZqrcqacbS /tmp/tmp.u1ye3oZa8g + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /tmp/tmp.AHGqOixxgz/proxy-protocol.yml + local config_file=/tmp/tmp.AHGqOixxgz/proxy-protocol.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.AHGqOixxgz/proxy-protocol.yml '' + local input_file=/tmp/tmp.AHGqOixxgz/proxy-protocol.yml + local pvc_name= + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.proxy-protocol-24941~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_OUT=/tmp/tmp.pzQHPKZi2y + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /tmp/tmp.AHGqOixxgz/proxy-protocol.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + local LAST_ERR=/tmp/tmp.TS3tFODuSw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pzQHPKZi2y perconaxtradbcluster.pxc.percona.com/proxy-protocol created + cat /tmp/tmp.TS3tFODuSw + rm /tmp/tmp.pzQHPKZi2y /tmp/tmp.TS3tFODuSw + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy proxy-protocol ++ local target_cluster=proxy-protocol +++ kubectl_bin get pxc proxy-protocol -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.phGkMsSEdK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kuBJF67s9J +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc proxy-protocol -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.phGkMsSEdK +++ cat /tmp/tmp.kuBJF67s9J +++ rm /tmp/tmp.phGkMsSEdK /tmp/tmp.kuBJF67s9J +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo proxy-protocol-haproxy ++ return + local proxy=proxy-protocol-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-protocol-24941 ++ mktemp + local LAST_OUT=/tmp/tmp.g89Nfczwdn ++ mktemp + local LAST_ERR=/tmp/tmp.PZzJeQxG8H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-protocol-24941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-protocol-24941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-protocol-24941 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.g89Nfczwdn + cat /tmp/tmp.PZzJeQxG8H error: no matching resources found + rm /tmp/tmp.g89Nfczwdn /tmp/tmp.PZzJeQxG8H + return 1 + true + wait_for_running proxy-protocol-haproxy 1 + local name=proxy-protocol-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-protocol-haproxy-0 480 + local pod=proxy-protocol-haproxy-0 + local max_retry=480 + local ns= ++ echo proxy-protocol-haproxy-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/proxy-protocol-haproxy-0 condition met waiting for pod/proxy-protocol-haproxy-0 to become Ready.Ok + wait_for_running proxy-protocol-pxc 3 + local name=proxy-protocol-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-protocol-pxc-0 480 + local pod=proxy-protocol-pxc-0 + local max_retry=480 + local ns= ++ echo proxy-protocol-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-protocol-pxc-0 condition met E0516 20:08:06.452275 8031 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dproxy-protocol-pxc-0&resourceVersion=1778962085946015024&timeoutSeconds=595&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/proxy-protocol-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-protocol-pxc-1 480 + local pod=proxy-protocol-pxc-1 + local max_retry=480 + local ns= ++ echo proxy-protocol-pxc-1 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/proxy-protocol-pxc-1 condition met waiting for pod/proxy-protocol-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-protocol-pxc-2 480 + local pod=proxy-protocol-pxc-2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo proxy-protocol-pxc-2 + local container=pxc + set +o xtrace pod/proxy-protocol-pxc-2 condition met waiting for pod/proxy-protocol-pxc-2 to become Ready.Ok + sleep 15 ++ kubectl get pxc proxy-protocol -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ base64 --decode ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pp501kbIKt +++ mktemp ++ local LAST_ERR=/tmp/tmp.vbj6IWgS7T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pp501kbIKt ++ cat /tmp/tmp.vbj6IWgS7T ++ rm /tmp/tmp.pp501kbIKt /tmp/tmp.vbj6IWgS7T ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h proxy-protocol-haproxy -uroot -p'\''root_password'\'' -P33062' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h proxy-protocol-haproxy -uroot -p'\''root_password'\'' -P33062' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2onQkq50o2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CKwfqHDOaO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2onQkq50o2 ++ cat /tmp/tmp.CKwfqHDOaO ++ rm /tmp/tmp.2onQkq50o2 /tmp/tmp.CKwfqHDOaO ++ return 0 + client_pod=pxc-client-5cc4fc98d5-gc4kz + wait_pod pxc-client-5cc4fc98d5-gc4kz + local pod=pxc-client-5cc4fc98d5-gc4kz + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-5cc4fc98d5-gc4kz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:11:00.096703 32069 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962257714420000&timeoutSeconds=568&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h proxy-protocol-haproxy -uroot -p'\''root_password'\'' -P33062' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h proxy-protocol-haproxy -uroot -p'\''root_password'\'' -P33062' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gboZ2bX6KR +++ mktemp ++ local LAST_ERR=/tmp/tmp.9g3sDRHDtj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gboZ2bX6KR ++ cat /tmp/tmp.9g3sDRHDtj ++ rm /tmp/tmp.gboZ2bX6KR /tmp/tmp.9g3sDRHDtj ++ return 0 + client_pod=pxc-client-5cc4fc98d5-gc4kz + wait_pod pxc-client-5cc4fc98d5-gc4kz + local pod=pxc-client-5cc4fc98d5-gc4kz + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-5cc4fc98d5-gc4kz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:11:13.746183 1698 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962271327022000&timeoutSeconds=572&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-0.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-0.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-0.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-0.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.larDBmuY6a +++ mktemp ++ local LAST_ERR=/tmp/tmp.BRZZdSiySy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.larDBmuY6a ++ cat /tmp/tmp.BRZZdSiySy ++ rm /tmp/tmp.larDBmuY6a /tmp/tmp.BRZZdSiySy ++ return 0 + client_pod=pxc-client-5cc4fc98d5-gc4kz + wait_pod pxc-client-5cc4fc98d5-gc4kz + local pod=pxc-client-5cc4fc98d5-gc4kz + local max_retry=480 + local ns= ++ echo pxc-client-5cc4fc98d5-gc4kz ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:11:56.291755 7838 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962314034194000&timeoutSeconds=322&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.AHGqOixxgz/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.AHGqOixxgz/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql /tmp/tmp.AHGqOixxgz/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-1.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-1.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-1.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-1.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fWHWBLQMqx +++ mktemp ++ local LAST_ERR=/tmp/tmp.dgBxwgfjBs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fWHWBLQMqx ++ cat /tmp/tmp.dgBxwgfjBs ++ rm /tmp/tmp.fWHWBLQMqx /tmp/tmp.dgBxwgfjBs ++ return 0 + client_pod=pxc-client-5cc4fc98d5-gc4kz + wait_pod pxc-client-5cc4fc98d5-gc4kz + local pod=pxc-client-5cc4fc98d5-gc4kz + local max_retry=480 + local ns= ++ echo pxc-client-5cc4fc98d5-gc4kz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:12:06.163107 9285 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962324140534000&timeoutSeconds=557&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.AHGqOixxgz/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.AHGqOixxgz/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql /tmp/tmp.AHGqOixxgz/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-2.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-2.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-protocol-pxc-2.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-protocol-pxc-2.proxy-protocol-pxc -uroot -p'\''root_password'\'' -P33062' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8lI1BbzriK +++ mktemp ++ local LAST_ERR=/tmp/tmp.e12QCDAFAO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8lI1BbzriK ++ cat /tmp/tmp.e12QCDAFAO ++ rm /tmp/tmp.8lI1BbzriK /tmp/tmp.e12QCDAFAO ++ return 0 + client_pod=pxc-client-5cc4fc98d5-gc4kz + wait_pod pxc-client-5cc4fc98d5-gc4kz + local pod=pxc-client-5cc4fc98d5-gc4kz + local max_retry=480 + local ns= ++ echo pxc-client-5cc4fc98d5-gc4kz ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:12:16.363459 10621 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962334337034000&timeoutSeconds=524&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.AHGqOixxgz/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.AHGqOixxgz/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/select-1.sql /tmp/tmp.AHGqOixxgz/select-1.sql + is_keyring_plugin_in_use proxy-protocol + local cluster=proxy-protocol + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + grep -E -o 'early-plugin-load=keyring_\w+.so' + kubectl exec proxy-protocol-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' + return 1 + sleep 120 ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jRhApXgXYY +++ mktemp ++ local LAST_ERR=/tmp/tmp.2mfoi6PKfU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jRhApXgXYY ++ cat /tmp/tmp.2mfoi6PKfU ++ rm /tmp/tmp.jRhApXgXYY /tmp/tmp.2mfoi6PKfU ++ return 0 + client_ip=10.132.177.35 + '[' 0 -eq 1 ']' + [[ -n '' ]] + retry=0 + service=proxy-protocol-haproxy + get_service_endpoint proxy-protocol-haproxy + local service=proxy-protocol-haproxy ++ kubectl_bin get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EJ8SZ5YeKi +++ mktemp ++ local LAST_ERR=/tmp/tmp.9HEQWQH80g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EJ8SZ5YeKi ++ cat /tmp/tmp.9HEQWQH80g ++ rm /tmp/tmp.EJ8SZ5YeKi /tmp/tmp.9HEQWQH80g ++ return 0 + local endpoint= + '[' -z '' ']' ++ kubectl_bin get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Htdz0RR7gs +++ mktemp ++ local LAST_ERR=/tmp/tmp.d1AAnku3V7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Htdz0RR7gs ++ cat /tmp/tmp.d1AAnku3V7 ++ rm /tmp/tmp.Htdz0RR7gs /tmp/tmp.d1AAnku3V7 ++ return 0 + endpoint=34.123.185.98 + '[' -n 34.123.185.98 ']' + '[' 34.123.185.98 '!=' null ']' + sed -e 's/^"//; s/"$//;' + echo 34.123.185.98 + head -n 1 + return ++ get_service_endpoint proxy-protocol-haproxy ++ local service=proxy-protocol-haproxy +++ kubectl_bin get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nuG4ldfqR7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hQDpbnHVl7 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.nuG4ldfqR7 +++ cat /tmp/tmp.hQDpbnHVl7 +++ rm /tmp/tmp.nuG4ldfqR7 /tmp/tmp.hQDpbnHVl7 +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2VCFq8JoSO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dwdeDmrsWi +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/proxy-protocol-haproxy -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2VCFq8JoSO +++ cat /tmp/tmp.dwdeDmrsWi +++ rm /tmp/tmp.2VCFq8JoSO /tmp/tmp.dwdeDmrsWi +++ return 0 ++ endpoint=34.123.185.98 ++ '[' -n 34.123.185.98 ']' ++ '[' 34.123.185.98 '!=' null ']' ++ head -n 1 ++ echo 34.123.185.98 ++ sed -e 's/^"//; s/"$//;' ++ return + service_ip=34.123.185.98 + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/proxy-protocol-pxc + local resource=statefulset/proxy-protocol-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc.yml + local new_result=/tmp/tmp.AHGqOixxgz/statefulset_proxy-protocol-pxc.yml + desc 'compare statefulset/proxy-protocol-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/proxy-protocol-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k133.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k129.yml ']' + version_gt 1.27 ++ bc -l ++ echo '1.33 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127-oc.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/proxy-protocol-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("proxy-protocol-24941", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.ukAwkL7tHl ++ mktemp + local LAST_ERR=/tmp/tmp.UCCl4PvQym + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/proxy-protocol-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ukAwkL7tHl + cat /tmp/tmp.UCCl4PvQym + rm /tmp/tmp.ukAwkL7tHl /tmp/tmp.UCCl4PvQym + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-pxc-k127.yml /tmp/tmp.AHGqOixxgz/statefulset_proxy-protocol-pxc.yml + log 'compare_kubectl: statefulset/proxy-protocol-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:14:34+0000]' compare_kubectl: statefulset/proxy-protocol-pxc OK [2026-05-16T20:14:34+0000] compare_kubectl: statefulset/proxy-protocol-pxc OK + compare_kubectl statefulset/proxy-protocol-haproxy + local resource=statefulset/proxy-protocol-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy.yml + local new_result=/tmp/tmp.AHGqOixxgz/statefulset_proxy-protocol-haproxy.yml + desc 'compare statefulset/proxy-protocol-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/proxy-protocol-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/proxy-protocol-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("proxy-protocol-24941", "namespace") | ++ mktemp (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.pAXVY0HUoq ++ mktemp + local LAST_ERR=/tmp/tmp.SDMTwxr6mO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/proxy-protocol-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pAXVY0HUoq + cat /tmp/tmp.SDMTwxr6mO + rm /tmp/tmp.pAXVY0HUoq /tmp/tmp.SDMTwxr6mO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/statefulset_proxy-protocol-haproxy-k127.yml /tmp/tmp.AHGqOixxgz/statefulset_proxy-protocol-haproxy.yml + log 'compare_kubectl: statefulset/proxy-protocol-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:14:35+0000]' compare_kubectl: statefulset/proxy-protocol-haproxy OK [2026-05-16T20:14:35+0000] compare_kubectl: statefulset/proxy-protocol-haproxy OK + compare_kubectl service/proxy-protocol-pxc + local resource=service/proxy-protocol-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc.yml + local new_result=/tmp/tmp.AHGqOixxgz/service_proxy-protocol-pxc.yml + desc 'compare service/proxy-protocol-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/proxy-protocol-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k127.yml ']' + version_gt 1.24 ++ echo '1.33 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k124.yml ']' + version_gt 1.22 ++ echo '1.33 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122-oc.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122-aks.yml ']' + kubectl_bin get -o yaml service/proxy-protocol-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("proxy-protocol-24941", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.TjAwUapwOm ++ mktemp + local LAST_ERR=/tmp/tmp.YCMY8iz7I1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/proxy-protocol-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TjAwUapwOm + cat /tmp/tmp.YCMY8iz7I1 + rm /tmp/tmp.TjAwUapwOm /tmp/tmp.YCMY8iz7I1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-pxc-k122.yml /tmp/tmp.AHGqOixxgz/service_proxy-protocol-pxc.yml + log 'compare_kubectl: service/proxy-protocol-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:14:36+0000]' compare_kubectl: service/proxy-protocol-pxc OK [2026-05-16T20:14:36+0000] compare_kubectl: service/proxy-protocol-pxc OK + compare_kubectl service/proxy-protocol-haproxy + local resource=service/proxy-protocol-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy.yml + local new_result=/tmp/tmp.AHGqOixxgz/service_proxy-protocol-haproxy.yml + desc 'compare service/proxy-protocol-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/proxy-protocol-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k127.yml ']' + version_gt 1.24 ++ bc -l ++ echo '1.33 >= 1.24' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k124.yml ']' + version_gt 1.22 ++ echo '1.33 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122-oc.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122-aks.yml ']' + kubectl_bin get -o yaml service/proxy-protocol-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("proxy-protocol-24941", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.tyMiuybbov ++ mktemp + local LAST_ERR=/tmp/tmp.iPUvwpVWao + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/proxy-protocol-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tyMiuybbov + cat /tmp/tmp.iPUvwpVWao + rm /tmp/tmp.tyMiuybbov /tmp/tmp.iPUvwpVWao + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-k122.yml /tmp/tmp.AHGqOixxgz/service_proxy-protocol-haproxy.yml + log 'compare_kubectl: service/proxy-protocol-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:14:38+0000]' compare_kubectl: service/proxy-protocol-haproxy OK [2026-05-16T20:14:38+0000] compare_kubectl: service/proxy-protocol-haproxy OK + compare_kubectl service/proxy-protocol-haproxy-replicas + local resource=service/proxy-protocol-haproxy-replicas + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas.yml + local new_result=/tmp/tmp.AHGqOixxgz/service_proxy-protocol-haproxy-replicas.yml + desc 'compare service/proxy-protocol-haproxy-replicas-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/proxy-protocol-haproxy-replicas- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k127.yml ']' + version_gt 1.24 ++ echo '1.33 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k124.yml ']' + version_gt 1.22 ++ echo '1.33 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122-oc.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122-aks.yml ']' + kubectl_bin get -o yaml service/proxy-protocol-haproxy-replicas ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("proxy-protocol-24941", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.1QNQFRV9yJ ++ mktemp + local LAST_ERR=/tmp/tmp.rpyEc0ucH2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/proxy-protocol-haproxy-replicas + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1QNQFRV9yJ + cat /tmp/tmp.rpyEc0ucH2 + rm /tmp/tmp.1QNQFRV9yJ /tmp/tmp.rpyEc0ucH2 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/compare/service_proxy-protocol-haproxy-replicas-k122.yml /tmp/tmp.AHGqOixxgz/service_proxy-protocol-haproxy-replicas.yml + log 'compare_kubectl: service/proxy-protocol-haproxy-replicas OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:14:38+0000]' compare_kubectl: service/proxy-protocol-haproxy-replicas OK [2026-05-16T20:14:38+0000] compare_kubectl: service/proxy-protocol-haproxy-replicas OK + desc 'check if client ip visible in mysql processlist' + set +o xtrace ----------------------------------------------------------------------------------- check if client ip visible in mysql processlist ----------------------------------------------------------------------------------- ++ run_mysql 'show processlist;' '-h 34.123.185.98 -uroot -proot_password' ++ local 'command=show processlist;' ++ local 'uri=-h 34.123.185.98 -uroot -proot_password' ++ grep -c 10.132.177.35 ++ grep 'show processlist' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BZrGKULfFG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KRFQK1O7GS +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BZrGKULfFG +++ cat /tmp/tmp.KRFQK1O7GS +++ rm /tmp/tmp.BZrGKULfFG /tmp/tmp.KRFQK1O7GS +++ return 0 ++ client_pod=pxc-client-5cc4fc98d5-gc4kz ++ wait_pod pxc-client-5cc4fc98d5-gc4kz ++ local pod=pxc-client-5cc4fc98d5-gc4kz ++ local max_retry=480 ++ local ns= +++ echo pxc-client-5cc4fc98d5-gc4kz +++ grep -E '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-5cc4fc98d5-gc4kz condition met E0516 20:14:41.151316 26155 reflector.go:227] "Failed to watch" err="Get \"https://34.28.179.30/api/v1/namespaces/proxy-protocol-24941/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-5cc4fc98d5-gc4kz&resourceVersion=1778962479604201000&timeoutSeconds=508&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-5cc4fc98d5-gc4kz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + nr_queries=1 + '[' 1 -ne 1 ']' + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/conf/proxy-protocol.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VYtWHiueN8 ++ mktemp + local LAST_ERR=/tmp/tmp.UryhpkkzV2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/proxy-protocol/conf/proxy-protocol.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VYtWHiueN8 perconaxtradbcluster.pxc.percona.com "proxy-protocol" deleted from proxy-protocol-24941 namespace + cat /tmp/tmp.UryhpkkzV2 Warning: pxc.percona.com/v1-11-0 PerconaXtraDBCluster is deprecated and will be removed in v1.16.0; see v1.12.0 release notes for instructions to migrate to pxc.percona.com/v1 + rm /tmp/tmp.VYtWHiueN8 /tmp/tmp.UryhpkkzV2 + return 0 + destroy proxy-protocol-24941 + local namespace=proxy-protocol-24941 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + tee /tmp/tmp.AHGqOixxgz/operator.log + sort -u + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v 'the object has been modified' + grep -v level=info ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.TuDsEvvgGP +++ mktemp ++ local LAST_ERR=/tmp/tmp.KAHVF21dOV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TuDsEvvgGP ++ cat /tmp/tmp.KAHVF21dOV ++ rm /tmp/tmp.TuDsEvvgGP /tmp/tmp.KAHVF21dOV ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-9d9fbdb5-xzkkr ++ mktemp + local LAST_OUT=/tmp/tmp.35CasBNtcF ++ mktemp + local LAST_ERR=/tmp/tmp.SDbDVWneF4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-9d9fbdb5-xzkkr + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.35CasBNtcF + cat /tmp/tmp.SDbDVWneF4 + rm /tmp/tmp.35CasBNtcF /tmp/tmp.SDbDVWneF4 + return 0 2026-05-16T20:05:47.830Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2026-05-16T20:05:47.830Z INFO setup Manager starting up {"gitCommit": "3dc7f023721e421071d4d2126e295ac1467895dd", "gitBranch": "PR-2467-3dc7f023", "buildTime": "2026-05-16T18:00:40Z", "goVersion": "go1.26.3", "os": "linux", "arch": "amd64"} 2026-05-16T20:05:47.830Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.33.11-gke.1137000"} 2026-05-16T20:05:47.833Z INFO setup Registering Components. 2026-05-16T20:05:48.262Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2026-05-16T20:05:48.263Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2026-05-16T20:05:48.263Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2026-05-16T20:05:48.263Z INFO controller-runtime.metrics Starting metrics server 2026-05-16T20:05:48.263Z INFO controller-runtime.webhook Starting webhook server 2026-05-16T20:05:48.263Z INFO setup Starting the Cmd. 2026-05-16T20:05:48.263Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2026-05-16T20:05:48.264Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2026-05-16T20:05:48.264Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2026-05-16T20:05:48.364Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2026-05-16T20:05:48.387Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2026-05-16T20:05:48.388Z DEBUG events percona-xtradb-cluster-operator-9d9fbdb5-xzkkr_c7794c83-dce5-4268-9fe3-0a773d6c8660 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"f363e26c-6ccd-4e1d-b089-1f15740d6a46","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1778961948382143009"}, "reason": "LeaderElection"} 2026-05-16T20:05:48.388Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2026-05-16T20:05:48.388Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2026-05-16T20:05:48.388Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2026-05-16T20:05:48.388Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2026-05-16T20:05:48.590Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2026-05-16T20:05:48.590Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2026-05-16T20:05:48.590Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2026-05-16T20:05:48.590Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2026-05-16T20:05:48.591Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2026-05-16T20:05:48.591Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2026-05-16T20:07:05.755Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "version": "1.20.0"} 2026-05-16T20:07:06.920Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2026-05-16T20:07:07.038Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-05-16T20:07:07.078Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-05-16T20:07:07.147Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T20:07:07.183Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T20:07:07.293Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T20:07:07.788Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "c190ddd4-9f1e-4d9d-b6c0-d557aea46d2a", "object": "proxy-protocol-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T20:07:08.569Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "e76c3535-c562-42b7-9d65-f345ad127b23", "object": "proxy-protocol-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-05-16T20:07:08.593Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "e76c3535-c562-42b7-9d65-f345ad127b23", "object": "proxy-protocol-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-05-16T20:07:08.642Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "e76c3535-c562-42b7-9d65-f345ad127b23", "object": "proxy-protocol-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}", "hashChanged": false, "metaChanged": true} 2026-05-16T20:07:59.971Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc", "user": "operator"} 2026-05-16T20:08:00.010Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc", "user": "monitor"} 2026-05-16T20:08:00.062Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc"} 2026-05-16T20:08:00.100Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc"} 2026-05-16T20:08:00.134Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc", "user": "xtrabackup"} 2026-05-16T20:08:00.175Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc"} 2026-05-16T20:08:00.228Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "0f976b44-4aae-4302-84a1-e2cfbe4d24cc", "user": "replication"} 2026-05-16T20:10:36.320Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "3b24f1a1-e48d-4cdf-ad83-65831bba25d5", "user": "root"} 2026-05-16T20:10:36.459Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "3b24f1a1-e48d-4cdf-ad83-65831bba25d5", "new version": "8.0.43-34.1"} 2026-05-16T20:15:21.971Z ERROR Update status {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-protocol","namespace":"proxy-protocol-24941"}, "namespace": "proxy-protocol-24941", "name": "proxy-protocol", "reconcileID": "842a2f04-706e-460c-9e05-c2630950301f", "error": "PerconaXtraDBCluster.pxc.percona.com \"proxy-protocol\" not found"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile.func1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.24.0/pkg/internal/controller/controller.go:221 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.24.0/pkg/internal/controller/controller.go:312 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.24.0/pkg/internal/controller/controller.go:437 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.24.0/pkg/internal/controller/controller.go:478 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:259 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:489 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 -  }, -  { -  }, +  AllocateLoadBalancerNodePorts: nil, -  AllocateLoadBalancerNodePorts: &true, -  APIVersion: "v1", -  APIVersion: "v1", -  "cloud.google.com/neg": `{"ingress":true}`, +  ClusterIPs: nil, -  ClusterIPs: []string{"34.118.230.73"}, +  CreationTimestamp: v1.Time{}, -  CreationTimestamp: v1.Time{Time: s"2026-05-16 20:07:07 +0000 UTC"}, -  FieldsType: "FieldsV1", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  FieldsV1: s`{"f:metadata":{"f:finalizers":{".":{},"v:\"service.kubernetes.io`..., +  Finalizers: nil, -  Finalizers: []string{"service.kubernetes.io/load-balancer-cleanup"}, +  IPFamilies: nil, -  IPFamilies: []v1.IPFamily{"IPv4"}, +  IPFamilyPolicy: nil, -  IPFamilyPolicy: &"SingleStack", +  ManagedFields: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  Manager: "cloud-controller-manager", -  Manager: "percona-xtradb-cluster-operator", +  NodePort: 0, -  NodePort: 31047, -  NodePort: 31780, -  NodePort: 32025, -  NodePort: 32094, -  NodePort: 32254, -  Operation: "Update", -  Operation: "Update", +  Protocol: "", -  Protocol: "TCP", +  ResourceVersion: "", -  ResourceVersion: "1778962027830815021", +  SessionAffinity: "", -  SessionAffinity: "None", -  Subresource: "status", -  Time: s"2026-05-16 20:07:07 +0000 UTC", -  Time: s"2026-05-16 20:07:07 +0000 UTC", +  UID: "", -  UID: "fbcb7f91-cc8d-4c42-ab8b-0947a9e3e29e",   }    },    },    {    },    ... // 2 identical fields    ... // 3 identical fields    Annotations: map[string]string{    AppProtocol: nil,    ClusterIP: "34.118.230.73",    DeletionGracePeriodSeconds: nil,    DeletionTimestamp: nil,    ExternalIPs: nil,    Generation: 0,    InternalTrafficPolicy: &"Cluster",    Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "proxy-protocol", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    LoadBalancerClass: nil,    LoadBalancerIP: "",    LoadBalancerSourceRanges: nil,    Name: "mysql",    Name: "mysql-admin",    Name: "mysqlx",    Name: "proxy-protocol",    Namespace: "proxy-protocol-24941",    Name: "stats",    ObjectMeta: v1.ObjectMeta{    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "proxy-protocol", UID: "dfa9f411-537a-42f7-b86f-3a59b0342efd", ...}},    "percona.com/last-config-hash": "eyJwb3J0cyI6W3sibmFtZSI6Im15c3FsIiwicG9ydCI6MzMwNiwidGFyZ2V0UG9y"...,    Port: 3306,    Port: 33060,    Port: 33062,    Port: 3309,    Port: 8404,    Ports: []v1.ServicePort{    PublishNotReadyAddresses: false,    Selector: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "proxy-protocol", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    SelfLink: "",    "service.beta.kubernetes.io/aws-load-balancer-type": "nlb",    SessionAffinityConfig: nil,    Spec: v1.ServiceSpec{    Status: {},    TargetPort: {IntVal: 3306},    TargetPort: {IntVal: 33060},    TargetPort: {IntVal: 33062},    TargetPort: {IntVal: 3309},    TargetPort: {IntVal: 8404},    TrafficDistribution: nil,    Type: "LoadBalancer",    TypeMeta: {Kind: "Service", APIVersion: "v1"},   &v1.Service{ + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.gSB86Vc43G ++ mktemp + local LAST_ERR=/tmp/tmp.EHBQ7IsI7x + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gSB86Vc43G No resources found + cat /tmp/tmp.EHBQ7IsI7x + rm /tmp/tmp.gSB86Vc43G /tmp/tmp.EHBQ7IsI7x + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.C4vzvHgcLj ++ mktemp + local LAST_ERR=/tmp/tmp.fTjXP0yxji + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.C4vzvHgcLj No resources found + cat /tmp/tmp.fTjXP0yxji + rm /tmp/tmp.C4vzvHgcLj /tmp/tmp.fTjXP0yxji + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.CBs8GP7JKv ++ mktemp + local LAST_ERR=/tmp/tmp.xBQxbehDWT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CBs8GP7JKv No resources found + cat /tmp/tmp.xBQxbehDWT + rm /tmp/tmp.CBs8GP7JKv /tmp/tmp.xBQxbehDWT + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.1HyI7SSWTh ++ mktemp + local LAST_ERR=/tmp/tmp.P1i98OQzmw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1HyI7SSWTh validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.P1i98OQzmw + rm /tmp/tmp.1HyI7SSWTh /tmp/tmp.P1i98OQzmw + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.19.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.AHGqOixxgz + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace proxy-protocol-24941 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.4BmRApXxf8 ++ mktemp + local LAST_OUT=/tmp/tmp.QaZ41211jB + local LAST_ERR=/tmp/tmp.y0stxlCqeK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace proxy-protocol-24941 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.ulnZ8d87hX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator