Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/logs/proxy-switch-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra proxy-switch-7169 + local ns=proxy-switch-7169 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n proxy-switch-16511 proxy-switch --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/proxy-switch patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.OvxN1DOcDq ++ mktemp + local LAST_ERR=/tmp/tmp.79H6dj23T3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OvxN1DOcDq perconaxtradbcluster.pxc.percona.com "proxy-switch" deleted from proxy-switch-16511 namespace + cat /tmp/tmp.79H6dj23T3 + rm /tmp/tmp.OvxN1DOcDq /tmp/tmp.79H6dj23T3 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.SACzsC5Rff ++ mktemp + local LAST_ERR=/tmp/tmp.SwszNgdZDS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SACzsC5Rff No resources found + cat /tmp/tmp.SwszNgdZDS + rm /tmp/tmp.SACzsC5Rff /tmp/tmp.SwszNgdZDS + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.QdJV2Kqrpy ++ mktemp + local LAST_ERR=/tmp/tmp.7hC8WUbuTd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QdJV2Kqrpy No resources found + cat /tmp/tmp.7hC8WUbuTd + rm /tmp/tmp.QdJV2Kqrpy /tmp/tmp.7hC8WUbuTd + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + kubectl_bin get ns + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.hzLhP5T2MD ++ mktemp + local LAST_OUT=/tmp/tmp.lpianx86wu ++ mktemp + local LAST_ERR=/tmp/tmp.cfU0m4uSNx + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.3p5cVJLa40 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lpianx86wu + cat /tmp/tmp.3p5cVJLa40 + rm /tmp/tmp.lpianx86wu /tmp/tmp.3p5cVJLa40 + return 0 namespace "proxy-switch-16511" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hzLhP5T2MD namespace "pxc-operator" deleted + cat /tmp/tmp.cfU0m4uSNx + rm /tmp/tmp.hzLhP5T2MD /tmp/tmp.cfU0m4uSNx + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.evmWsYlhBz ++ mktemp + local LAST_ERR=/tmp/tmp.arnulVqjuo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.evmWsYlhBz namespace/pxc-operator created + cat /tmp/tmp.arnulVqjuo + rm /tmp/tmp.evmWsYlhBz /tmp/tmp.arnulVqjuo + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.uNJyIjXh95 +++ mktemp ++ local LAST_ERR=/tmp/tmp.V6PdwQykRA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uNJyIjXh95 ++ cat /tmp/tmp.V6PdwQykRA ++ rm /tmp/tmp.uNJyIjXh95 /tmp/tmp.V6PdwQykRA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.nybytQKjdA ++ mktemp + local LAST_ERR=/tmp/tmp.EASOlx8WMY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nybytQKjdA Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7" modified. + cat /tmp/tmp.EASOlx8WMY + rm /tmp/tmp.nybytQKjdA /tmp/tmp.EASOlx8WMY + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TF5ItTvPry ++ mktemp + local LAST_ERR=/tmp/tmp.2rt91ClsOm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TF5ItTvPry customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.2rt91ClsOm + rm /tmp/tmp.TF5ItTvPry /tmp/tmp.2rt91ClsOm + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/deploy/cw-rbac.yaml + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.1nMV37IFJn ++ mktemp + local LAST_ERR=/tmp/tmp.PDZf25pa2T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1nMV37IFJn clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.PDZf25pa2T + rm /tmp/tmp.1nMV37IFJn /tmp/tmp.PDZf25pa2T + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2261-65dc3545^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3C6UDEciZN ++ mktemp + local LAST_ERR=/tmp/tmp.VDsvCgiRd7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3C6UDEciZN deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.VDsvCgiRd7 + rm /tmp/tmp.3C6UDEciZN /tmp/tmp.VDsvCgiRd7 + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.L7nf1b99Fe ++ mktemp + local LAST_ERR=/tmp/tmp.qadva50W7v + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.L7nf1b99Fe pod/percona-xtradb-cluster-operator-b5ccc7b5f-k55bf condition met + cat /tmp/tmp.qadva50W7v + rm /tmp/tmp.L7nf1b99Fe /tmp/tmp.qadva50W7v + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.XEYBNTZpZ7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.F91jn4HJbF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XEYBNTZpZ7 ++ cat /tmp/tmp.F91jn4HJbF ++ rm /tmp/tmp.XEYBNTZpZ7 /tmp/tmp.F91jn4HJbF ++ return 0 + wait_pod percona-xtradb-cluster-operator-b5ccc7b5f-k55bf 480 pxc-operator + local pod=percona-xtradb-cluster-operator-b5ccc7b5f-k55bf + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-b5ccc7b5f-k55bf ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-b5ccc7b5f-k55bf condition met waiting for pod/percona-xtradb-cluster-operator-b5ccc7b5f-k55bf to become Ready.Ok + sleep 3 + create_namespace proxy-switch-7169 + local namespace=proxy-switch-7169 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces proxy-switch-7169' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces proxy-switch-7169 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace proxy-switch-7169 + kubectl_bin get ns ++ mktemp ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.HdNYbhAxT8 + local LAST_OUT=/tmp/tmp.QCyT3ehyho ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.JJ7OgQNVj7 + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.6WaggCqk7b + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-switch-7169 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-switch-7169 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QCyT3ehyho + cat /tmp/tmp.6WaggCqk7b + rm /tmp/tmp.QCyT3ehyho /tmp/tmp.6WaggCqk7b + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace proxy-switch-7169 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.HdNYbhAxT8 + cat /tmp/tmp.JJ7OgQNVj7 Error from server (NotFound): namespaces "proxy-switch-7169" not found + rm /tmp/tmp.HdNYbhAxT8 /tmp/tmp.JJ7OgQNVj7 + return 1 + : + wait_for_delete namespace/proxy-switch-7169 + local res=namespace/proxy-switch-7169 + echo -n 'waiting for namespace/proxy-switch-7169 to be deleted' waiting for namespace/proxy-switch-7169 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "proxy-switch-7169" not found + desc 'create namespace proxy-switch-7169' + set +o xtrace ----------------------------------------------------------------------------------- create namespace proxy-switch-7169 ----------------------------------------------------------------------------------- + kubectl_bin create namespace proxy-switch-7169 ++ mktemp + local LAST_OUT=/tmp/tmp.loY1dfI4V2 ++ mktemp + local LAST_ERR=/tmp/tmp.oJaJ1pmMAN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace proxy-switch-7169 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.loY1dfI4V2 namespace/proxy-switch-7169 created + cat /tmp/tmp.oJaJ1pmMAN + rm /tmp/tmp.loY1dfI4V2 /tmp/tmp.oJaJ1pmMAN + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.h3u0JtHkHM +++ mktemp ++ local LAST_ERR=/tmp/tmp.IrgS5pONRG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.h3u0JtHkHM ++ cat /tmp/tmp.IrgS5pONRG ++ rm /tmp/tmp.h3u0JtHkHM /tmp/tmp.IrgS5pONRG ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7 --namespace=proxy-switch-7169 ++ mktemp + local LAST_OUT=/tmp/tmp.lRqWtMe9fI ++ mktemp + local LAST_ERR=/tmp/tmp.yI7V8arhwy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7 --namespace=proxy-switch-7169 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lRqWtMe9fI Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2261-65dc3545-7-cluster7" modified. + cat /tmp/tmp.yI7V8arhwy + rm /tmp/tmp.lRqWtMe9fI /tmp/tmp.yI7V8arhwy + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qHjq9vvbyG ++ mktemp + local LAST_ERR=/tmp/tmp.9iyZUaW1rh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qHjq9vvbyG secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.9iyZUaW1rh + rm /tmp/tmp.qHjq9vvbyG /tmp/tmp.9iyZUaW1rh + return 0 + desc 'create PXC cluster with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster with HAProxy ----------------------------------------------------------------------------------- + cluster=proxy-switch + spinup_pxc proxy-switch /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml 3 10 + local cluster=proxy-switch + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.OXZkvxuSvl ++ mktemp + local LAST_ERR=/tmp/tmp.RhBXyj6VVf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OXZkvxuSvl secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.RhBXyj6VVf + rm /tmp/tmp.OXZkvxuSvl /tmp/tmp.RhBXyj6VVf + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.WRFQZHww5S + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.proxy-switch-7169~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.sWcKv2XDBg + local exit_status=0 + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2261-65dc3545#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WRFQZHww5S deployment.apps/pxc-client created + cat /tmp/tmp.sWcKv2XDBg + rm /tmp/tmp.WRFQZHww5S /tmp/tmp.sWcKv2XDBg + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/conf/proxy-switch.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.VDA1uaJGhH + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2261-65dc3545#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.proxy-switch-7169~ ++ mktemp + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + local LAST_ERR=/tmp/tmp.JDLHw0aiLb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VDA1uaJGhH perconaxtradbcluster.pxc.percona.com/proxy-switch created + cat /tmp/tmp.JDLHw0aiLb + rm /tmp/tmp.VDA1uaJGhH /tmp/tmp.JDLHw0aiLb + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy proxy-switch ++ local target_cluster=proxy-switch +++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.w4Qdg5FL0O ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gpj9uoJ9Rz +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.w4Qdg5FL0O +++ cat /tmp/tmp.gpj9uoJ9Rz +++ rm /tmp/tmp.w4Qdg5FL0O /tmp/tmp.gpj9uoJ9Rz +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo proxy-switch-haproxy ++ return + local proxy=proxy-switch-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-switch-7169 ++ mktemp + local LAST_OUT=/tmp/tmp.wPy8cGpwxU ++ mktemp + local LAST_ERR=/tmp/tmp.VjGdnqUME0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-switch-7169 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-switch-7169 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n proxy-switch-7169 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.wPy8cGpwxU + cat /tmp/tmp.VjGdnqUME0 error: no matching resources found + rm /tmp/tmp.wPy8cGpwxU /tmp/tmp.VjGdnqUME0 + return 1 + true + wait_for_running proxy-switch-haproxy 1 + local name=proxy-switch-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-0 480 + local pod=proxy-switch-haproxy-0 + local max_retry=480 + local ns= ++ echo proxy-switch-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/proxy-switch-haproxy-0 condition met waiting for pod/proxy-switch-haproxy-0 to become Ready.Ok + wait_for_running proxy-switch-pxc 3 + local name=proxy-switch-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-0 480 + local pod=proxy-switch-pxc-0 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-0 condition met waiting for pod/proxy-switch-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-1 480 + local pod=proxy-switch-pxc-1 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-1 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-1 condition met waiting for pod/proxy-switch-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-2 480 + local pod=proxy-switch-pxc-2 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-2 condition met waiting for pod/proxy-switch-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.OisC8XPeGF +++ mktemp ++ local LAST_ERR=/tmp/tmp.LZCxmNhiWU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OisC8XPeGF ++ cat /tmp/tmp.LZCxmNhiWU ++ rm /tmp/tmp.OisC8XPeGF /tmp/tmp.LZCxmNhiWU ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h proxy-switch-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h proxy-switch-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xgCzbBvh9z +++ mktemp ++ local LAST_ERR=/tmp/tmp.pslcm3SoPZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xgCzbBvh9z ++ cat /tmp/tmp.pslcm3SoPZ ++ rm /tmp/tmp.xgCzbBvh9z /tmp/tmp.pslcm3SoPZ ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h proxy-switch-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h proxy-switch-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tx7wIfynM4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7kQIwTh3BZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Tx7wIfynM4 ++ cat /tmp/tmp.7kQIwTh3BZ ++ rm /tmp/tmp.Tx7wIfynM4 /tmp/tmp.7kQIwTh3BZ ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-59944c5bbf-mqq25 ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tkgvCKOtaH +++ mktemp ++ local LAST_ERR=/tmp/tmp.yU4PgRoySF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tkgvCKOtaH ++ cat /tmp/tmp.yU4PgRoySF ++ rm /tmp/tmp.tkgvCKOtaH /tmp/tmp.yU4PgRoySF ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uJrTUA0yFm +++ mktemp ++ local LAST_ERR=/tmp/tmp.afPh60EeuM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uJrTUA0yFm ++ cat /tmp/tmp.afPh60EeuM ++ rm /tmp/tmp.uJrTUA0yFm /tmp/tmp.afPh60EeuM ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.czI4XZkU6E +++ mktemp ++ local LAST_ERR=/tmp/tmp.md0zC2s7SB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.czI4XZkU6E ++ cat /tmp/tmp.md0zC2s7SB ++ rm /tmp/tmp.czI4XZkU6E /tmp/tmp.md0zC2s7SB ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql ++ is_keyring_plugin_in_use proxy-switch ++ local cluster=proxy-switch ++ kubectl_bin exec -it proxy-switch-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ grep -E -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U9V4kX351x +++ mktemp ++ local LAST_ERR=/tmp/tmp.jqKmo8tpQu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it proxy-switch-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U9V4kX351x ++ cat /tmp/tmp.jqKmo8tpQu Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.U9V4kX351x /tmp/tmp.jqKmo8tpQu ++ return 0 + '[' '' ']' + desc 'check cluster is ready with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- check cluster is ready with HAProxy ----------------------------------------------------------------------------------- + wait_for_running proxy-switch-pxc 3 + local name=proxy-switch-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-0 480 + local pod=proxy-switch-pxc-0 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo proxy-switch-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-0 condition met waiting for pod/proxy-switch-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-1 480 + local pod=proxy-switch-pxc-1 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-1 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-1 condition met waiting for pod/proxy-switch-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-2 480 + local pod=proxy-switch-pxc-2 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-2 condition met waiting for pod/proxy-switch-pxc-2 to become Ready.Ok + wait_for_running proxy-switch-haproxy 3 + local name=proxy-switch-haproxy + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-0 480 + local pod=proxy-switch-haproxy-0 + local max_retry=480 + local ns= ++ echo proxy-switch-haproxy-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/proxy-switch-haproxy-0 condition met waiting for pod/proxy-switch-haproxy-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-1 480 + local pod=proxy-switch-haproxy-1 + local max_retry=480 + local ns= ++ echo proxy-switch-haproxy-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/proxy-switch-haproxy-1 condition met waiting for pod/proxy-switch-haproxy-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-2 480 + local pod=proxy-switch-haproxy-2 + local max_retry=480 + local ns= ++ echo proxy-switch-haproxy-2 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/proxy-switch-haproxy-2 condition met waiting for pod/proxy-switch-haproxy-2 to become Ready.Ok + wait_cluster_consistency proxy-switch 3 3 + local cluster_name=proxy-switch + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/proxy-switch to be ready' waiting for pxc/proxy-switch to be ready++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2TR2vuHXeN +++ mktemp ++ local LAST_ERR=/tmp/tmp.KgUvBMTzUe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2TR2vuHXeN ++ cat /tmp/tmp.KgUvBMTzUe ++ rm /tmp/tmp.2TR2vuHXeN /tmp/tmp.KgUvBMTzUe ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.03cuGbvwC0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IixLfgpWbf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.03cuGbvwC0 ++ cat /tmp/tmp.IixLfgpWbf ++ rm /tmp/tmp.03cuGbvwC0 /tmp/tmp.IixLfgpWbf ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine proxy-switch +++ local cluster_name=proxy-switch ++++ get_proxy proxy-switch ++++ local target_cluster=proxy-switch +++++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.C5TlrPtcl0 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.u6fuEGRkP9 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.C5TlrPtcl0 +++++ cat /tmp/tmp.u6fuEGRkP9 +++++ rm /tmp/tmp.C5TlrPtcl0 /tmp/tmp.u6fuEGRkP9 +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo proxy-switch-haproxy ++++ return +++ local cluster_proxy=proxy-switch-haproxy +++ echo haproxy ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WONGzKEQHZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.DTYe4j2hQm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WONGzKEQHZ ++ cat /tmp/tmp.DTYe4j2hQm ++ rm /tmp/tmp.WONGzKEQHZ /tmp/tmp.DTYe4j2hQm ++ return 0 + [[ 3 == \3 ]] + echo + desc 'write data and check connectivity through HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- write data and check connectivity through HAProxy ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ysRULo390f +++ mktemp ++ local LAST_ERR=/tmp/tmp.MgKZZ0di15 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ysRULo390f ++ cat /tmp/tmp.MgKZZ0di15 ++ rm /tmp/tmp.ysRULo390f /tmp/tmp.MgKZZ0di15 ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xEcs9b4N8G +++ mktemp ++ local LAST_ERR=/tmp/tmp.vsUqf8a5Pu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xEcs9b4N8G ++ cat /tmp/tmp.vsUqf8a5Pu ++ rm /tmp/tmp.xEcs9b4N8G /tmp/tmp.vsUqf8a5Pu ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace ERROR 1062 (23000) at line 1: Duplicate entry '100500' for key 'myApp.PRIMARY' command terminated with exit code 1 + sleep 10 + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-haproxy -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qo1j5Wm0Rz +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8LtWm5VCh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qo1j5Wm0Rz ++ cat /tmp/tmp.L8LtWm5VCh ++ rm /tmp/tmp.qo1j5Wm0Rz /tmp/tmp.L8LtWm5VCh ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + desc 'verify data exists on all PXC nodes' + set +o xtrace ----------------------------------------------------------------------------------- verify data exists on all PXC nodes ----------------------------------------------------------------------------------- + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.900UnEImi2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vUICV834GQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.900UnEImi2 ++ cat /tmp/tmp.vUICV834GQ ++ rm /tmp/tmp.900UnEImi2 /tmp/tmp.vUICV834GQ ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mvIv7zUghN +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z4RUWGFyM4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mvIv7zUghN ++ cat /tmp/tmp.Z4RUWGFyM4 ++ rm /tmp/tmp.mvIv7zUghN /tmp/tmp.Z4RUWGFyM4 ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wy9As2XsA0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qjACkRewSH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Wy9As2XsA0 ++ cat /tmp/tmp.qjACkRewSH ++ rm /tmp/tmp.Wy9As2XsA0 /tmp/tmp.qjACkRewSH ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-mqq25 + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + desc 'switch from HAProxy to ProxySQL' + set +o xtrace ----------------------------------------------------------------------------------- switch from HAProxy to ProxySQL ----------------------------------------------------------------------------------- + kubectl_bin patch pxc proxy-switch --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.Q9hXDbhLUp ++ mktemp + local LAST_ERR=/tmp/tmp.Vd13MwxmWw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc proxy-switch --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Q9hXDbhLUp perconaxtradbcluster.pxc.percona.com/proxy-switch patched + cat /tmp/tmp.Vd13MwxmWw + rm /tmp/tmp.Q9hXDbhLUp /tmp/tmp.Vd13MwxmWw + return 0 + desc 'wait for ProxySQL to be ready and HAProxy to be removed' + set +o xtrace ----------------------------------------------------------------------------------- wait for ProxySQL to be ready and HAProxy to be removed ----------------------------------------------------------------------------------- + wait_for_delete sts/proxy-switch-haproxy + local res=sts/proxy-switch-haproxy + echo -n 'waiting for sts/proxy-switch-haproxy to be deleted' waiting for sts/proxy-switch-haproxy to be deleted+ set +o xtrace Error from server (NotFound): statefulsets.apps "proxy-switch-haproxy" not found + wait_for_running proxy-switch-proxysql 3 + local name=proxy-switch-proxysql + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-proxysql-0 480 + local pod=proxy-switch-proxysql-0 + local max_retry=480 + local ns= ++ echo proxy-switch-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/proxy-switch-proxysql-0 condition met waiting for pod/proxy-switch-proxysql-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-proxysql-1 480 + local pod=proxy-switch-proxysql-1 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-proxysql-1 + local container=proxysql + set +o xtrace pod/proxy-switch-proxysql-1 condition met waiting for pod/proxy-switch-proxysql-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-proxysql-2 480 + local pod=proxy-switch-proxysql-2 + local max_retry=480 + local ns= ++ echo proxy-switch-proxysql-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/proxy-switch-proxysql-2 condition met waiting for pod/proxy-switch-proxysql-2 to become Ready.Ok + wait_cluster_consistency proxy-switch 3 3 + local cluster_name=proxy-switch + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/proxy-switch to be ready' waiting for pxc/proxy-switch to be ready++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4fCxtJ73H7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TPoosq6gAv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4fCxtJ73H7 ++ cat /tmp/tmp.TPoosq6gAv ++ rm /tmp/tmp.4fCxtJ73H7 /tmp/tmp.TPoosq6gAv ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E73r06g6W3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jn1a2VFPMD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.E73r06g6W3 ++ cat /tmp/tmp.jn1a2VFPMD ++ rm /tmp/tmp.E73r06g6W3 /tmp/tmp.jn1a2VFPMD ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine proxy-switch +++ local cluster_name=proxy-switch ++++ get_proxy proxy-switch ++++ local target_cluster=proxy-switch +++++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.EGIOoA5BgK ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.uRPJ1f25Qa +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.EGIOoA5BgK +++++ cat /tmp/tmp.uRPJ1f25Qa +++++ rm /tmp/tmp.EGIOoA5BgK /tmp/tmp.uRPJ1f25Qa +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.otvb7oRtmY ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.w8Xs6UOJzU +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.otvb7oRtmY +++++ cat /tmp/tmp.w8Xs6UOJzU +++++ rm /tmp/tmp.otvb7oRtmY /tmp/tmp.w8Xs6UOJzU +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo proxy-switch-proxysql ++++ return +++ local cluster_proxy=proxy-switch-proxysql +++ echo proxysql ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4IoTtfN9aO +++ mktemp ++ local LAST_ERR=/tmp/tmp.cDaiaWAWqZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4IoTtfN9aO ++ cat /tmp/tmp.cDaiaWAWqZ ++ rm /tmp/tmp.4IoTtfN9aO /tmp/tmp.cDaiaWAWqZ ++ return 0 + [[ 3 == \3 ]] + echo + desc 'verify HAProxy pods are deleted' + set +o xtrace ----------------------------------------------------------------------------------- verify HAProxy pods are deleted ----------------------------------------------------------------------------------- + kubectl_bin get pods -l app.kubernetes.io/name=percona-xtradb-cluster,app.kubernetes.io/instance=proxy-switch,app.kubernetes.io/component=haproxy + grep -q haproxy + desc 'verify ProxySQL service exists' + set +o xtrace ----------------------------------------------------------------------------------- verify ProxySQL service exists ----------------------------------------------------------------------------------- + kubectl_bin get service proxy-switch-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.FJy8l6oJF6 ++ mktemp + local LAST_ERR=/tmp/tmp.7Y29mNtXSd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get service proxy-switch-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FJy8l6oJF6 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE proxy-switch-proxysql ClusterIP 34.118.238.97 3306/TCP,33062/TCP,6070/TCP 68s + cat /tmp/tmp.7Y29mNtXSd + rm /tmp/tmp.FJy8l6oJF6 /tmp/tmp.7Y29mNtXSd + return 0 + desc 'check connectivity through ProxySQL after switch' + set +o xtrace ----------------------------------------------------------------------------------- check connectivity through ProxySQL after switch ----------------------------------------------------------------------------------- + sleep 20 + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h proxy-switch-proxysql -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-proxysql -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-proxysql -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e5skCq2zFG +++ mktemp ++ local LAST_ERR=/tmp/tmp.910KzJQoNT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.e5skCq2zFG ++ cat /tmp/tmp.910KzJQoNT ++ rm /tmp/tmp.e5skCq2zFG /tmp/tmp.910KzJQoNT ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-1.sql /tmp/tmp.nls9O08usK/select-1.sql + desc 'write new data through ProxySQL' + set +o xtrace ----------------------------------------------------------------------------------- write new data through ProxySQL ----------------------------------------------------------------------------------- + run_mysql 'INSERT myApp.myApp (id) VALUES (100501)' '-h proxy-switch-proxysql -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100501)' + local 'uri=-h proxy-switch-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5gmqWMwsTF +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsnTEKY2p2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5gmqWMwsTF ++ cat /tmp/tmp.CsnTEKY2p2 ++ rm /tmp/tmp.5gmqWMwsTF /tmp/tmp.CsnTEKY2p2 ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 10 + compare_mysql_cmd select-2 'SELECT * from myApp.myApp;' '-h proxy-switch-proxysql -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-proxysql -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-proxysql -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.psTqmY29db +++ mktemp ++ local LAST_ERR=/tmp/tmp.UkeyMCxRR6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.psTqmY29db ++ cat /tmp/tmp.UkeyMCxRR6 ++ rm /tmp/tmp.psTqmY29db /tmp/tmp.UkeyMCxRR6 ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-2.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql /tmp/tmp.nls9O08usK/select-2.sql + desc 'verify new data exists on all PXC nodes' + set +o xtrace ----------------------------------------------------------------------------------- verify new data exists on all PXC nodes ----------------------------------------------------------------------------------- + compare_mysql_cmd select-2 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IBmQkdYYut +++ mktemp ++ local LAST_ERR=/tmp/tmp.gsMmcRc4j7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IBmQkdYYut ++ cat /tmp/tmp.gsMmcRc4j7 ++ rm /tmp/tmp.IBmQkdYYut /tmp/tmp.gsMmcRc4j7 ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-2.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql /tmp/tmp.nls9O08usK/select-2.sql + compare_mysql_cmd select-2 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o7F8tDWl4z +++ mktemp ++ local LAST_ERR=/tmp/tmp.mvj9cqsZTQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.o7F8tDWl4z ++ cat /tmp/tmp.mvj9cqsZTQ ++ rm /tmp/tmp.o7F8tDWl4z /tmp/tmp.mvj9cqsZTQ ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-2.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql /tmp/tmp.nls9O08usK/select-2.sql + compare_mysql_cmd select-2 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T7aGmZAoCv +++ mktemp ++ local LAST_ERR=/tmp/tmp.mIpEnHYuLo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.T7aGmZAoCv ++ cat /tmp/tmp.mIpEnHYuLo ++ rm /tmp/tmp.T7aGmZAoCv /tmp/tmp.mIpEnHYuLo ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-2.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-2.sql /tmp/tmp.nls9O08usK/select-2.sql + desc 'verify ProxySQL is routing to primary' + set +o xtrace ----------------------------------------------------------------------------------- verify ProxySQL is routing to primary ----------------------------------------------------------------------------------- ++ get_proxy_primary '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' proxy-switch-proxysql-0 ++ local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' ++ local pod=proxy-switch-proxysql-0 +++ run_mysql_local 'SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='\''ONLINE'\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' proxy-switch-proxysql-0 proxysql +++ local 'command=SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='\''ONLINE'\'';' +++ local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' +++ local pod=proxy-switch-proxysql-0 +++ local container_name=proxysql +++ set +o xtrace ++ local ip=proxy-switch-pxc-0.proxy-switch-pxc.proxy-switch-7169.svc.cluster.local +++ wc -l +++ echo proxy-switch-pxc-0.proxy-switch-pxc.proxy-switch-7169.svc.cluster.local ++ '[' 1 '!=' 1 ']' ++ cut -d. -f1 ++ echo proxy-switch-pxc-0.proxy-switch-pxc.proxy-switch-7169.svc.cluster.local + initial_primary=proxy-switch-pxc-0 + '[' -z proxy-switch-pxc-0 ']' + echo 'ProxySQL is routing to primary: proxy-switch-pxc-0' ProxySQL is routing to primary: proxy-switch-pxc-0 + desc 'switch back to HAProxy from ProxySQL' + set +o xtrace ----------------------------------------------------------------------------------- switch back to HAProxy from ProxySQL ----------------------------------------------------------------------------------- + kubectl_bin patch pxc proxy-switch --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": true}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": false} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.VLsbUPFQzO ++ mktemp + local LAST_ERR=/tmp/tmp.c4O1q82uL0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc proxy-switch --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": true}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": false} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VLsbUPFQzO perconaxtradbcluster.pxc.percona.com/proxy-switch patched + cat /tmp/tmp.c4O1q82uL0 + rm /tmp/tmp.VLsbUPFQzO /tmp/tmp.c4O1q82uL0 + return 0 + desc 'wait for HAProxy to be ready and ProxySQL to be removed' + set +o xtrace ----------------------------------------------------------------------------------- wait for HAProxy to be ready and ProxySQL to be removed ----------------------------------------------------------------------------------- + wait_for_delete sts/proxy-switch-proxysql + local res=sts/proxy-switch-proxysql + echo -n 'waiting for sts/proxy-switch-proxysql to be deleted' waiting for sts/proxy-switch-proxysql to be deleted+ set +o xtrace Error from server (NotFound): statefulsets.apps "proxy-switch-proxysql" not found + wait_for_running proxy-switch-haproxy 3 + local name=proxy-switch-haproxy + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-0 480 + local pod=proxy-switch-haproxy-0 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-haproxy-0 + local container= + set +o xtrace pod/proxy-switch-haproxy-0 condition met waiting for pod/proxy-switch-haproxy-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-1 480 + local pod=proxy-switch-haproxy-1 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo proxy-switch-haproxy-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/proxy-switch-haproxy-1 condition met waiting for pod/proxy-switch-haproxy-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-2 480 + local pod=proxy-switch-haproxy-2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-haproxy-2 ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/proxy-switch-haproxy-2 condition met waiting for pod/proxy-switch-haproxy-2 to become Ready.Ok + wait_cluster_consistency proxy-switch 3 3 + local cluster_name=proxy-switch + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/proxy-switch to be ready' waiting for pxc/proxy-switch to be ready++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MCmlPEZlZg +++ mktemp ++ local LAST_ERR=/tmp/tmp.YN2bAXU3yZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MCmlPEZlZg ++ cat /tmp/tmp.YN2bAXU3yZ ++ rm /tmp/tmp.MCmlPEZlZg /tmp/tmp.YN2bAXU3yZ ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vtdxmLoEiN +++ mktemp ++ local LAST_ERR=/tmp/tmp.laeqE7xqEf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vtdxmLoEiN ++ cat /tmp/tmp.laeqE7xqEf ++ rm /tmp/tmp.vtdxmLoEiN /tmp/tmp.laeqE7xqEf ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine proxy-switch +++ local cluster_name=proxy-switch ++++ get_proxy proxy-switch ++++ local target_cluster=proxy-switch +++++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.b6KFYMTv6h ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.cyoWpY3Mnr +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.b6KFYMTv6h +++++ cat /tmp/tmp.cyoWpY3Mnr +++++ rm /tmp/tmp.b6KFYMTv6h /tmp/tmp.cyoWpY3Mnr +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo proxy-switch-haproxy ++++ return +++ local cluster_proxy=proxy-switch-haproxy +++ echo haproxy ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mGXBdR8QPt +++ mktemp ++ local LAST_ERR=/tmp/tmp.1lRAXewjYL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mGXBdR8QPt ++ cat /tmp/tmp.1lRAXewjYL ++ rm /tmp/tmp.mGXBdR8QPt /tmp/tmp.1lRAXewjYL ++ return 0 + [[ 3 == \3 ]] + echo + desc 'verify ProxySQL pods are deleted' + set +o xtrace ----------------------------------------------------------------------------------- verify ProxySQL pods are deleted ----------------------------------------------------------------------------------- + kubectl_bin get pods -l app.kubernetes.io/name=percona-xtradb-cluster,app.kubernetes.io/instance=proxy-switch,app.kubernetes.io/component=proxysql + grep -q proxysql + desc 'check cluster is ready with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- check cluster is ready with HAProxy ----------------------------------------------------------------------------------- + wait_for_running proxy-switch-pxc 3 + local name=proxy-switch-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-0 480 + local pod=proxy-switch-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo proxy-switch-pxc-0 + local container=pxc + set +o xtrace pod/proxy-switch-pxc-0 condition met waiting for pod/proxy-switch-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-1 480 + local pod=proxy-switch-pxc-1 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-1 condition met waiting for pod/proxy-switch-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-pxc-2 480 + local pod=proxy-switch-pxc-2 + local max_retry=480 + local ns= ++ echo proxy-switch-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/proxy-switch-pxc-2 condition met waiting for pod/proxy-switch-pxc-2 to become Ready.Ok + wait_for_running proxy-switch-haproxy 3 + local name=proxy-switch-haproxy + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-0 480 + local pod=proxy-switch-haproxy-0 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-haproxy-0 + local container= + set +o xtrace pod/proxy-switch-haproxy-0 condition met waiting for pod/proxy-switch-haproxy-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-1 480 + local pod=proxy-switch-haproxy-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-haproxy-1 ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/proxy-switch-haproxy-1 condition met waiting for pod/proxy-switch-haproxy-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod proxy-switch-haproxy-2 480 + local pod=proxy-switch-haproxy-2 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo proxy-switch-haproxy-2 + local container= + set +o xtrace pod/proxy-switch-haproxy-2 condition met waiting for pod/proxy-switch-haproxy-2 to become Ready.Ok + wait_cluster_consistency proxy-switch 3 3 + local cluster_name=proxy-switch + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/proxy-switch to be ready' waiting for pxc/proxy-switch to be ready++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WaC6h3sgAR +++ mktemp ++ local LAST_ERR=/tmp/tmp.4mRC2DpaMa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WaC6h3sgAR ++ cat /tmp/tmp.4mRC2DpaMa ++ rm /tmp/tmp.WaC6h3sgAR /tmp/tmp.4mRC2DpaMa ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oxFfQKbNia +++ mktemp ++ local LAST_ERR=/tmp/tmp.qJue1GRho0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oxFfQKbNia ++ cat /tmp/tmp.qJue1GRho0 ++ rm /tmp/tmp.oxFfQKbNia /tmp/tmp.qJue1GRho0 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine proxy-switch +++ local cluster_name=proxy-switch ++++ get_proxy proxy-switch ++++ local target_cluster=proxy-switch +++++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qh65iT5jTt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.wok8h0hAK8 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc proxy-switch -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.qh65iT5jTt +++++ cat /tmp/tmp.wok8h0hAK8 +++++ rm /tmp/tmp.qh65iT5jTt /tmp/tmp.wok8h0hAK8 +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo proxy-switch-haproxy ++++ return +++ local cluster_proxy=proxy-switch-haproxy +++ echo haproxy ++ kubectl_bin get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MC7i3GC3Lz +++ mktemp ++ local LAST_ERR=/tmp/tmp.DKV7ov0FeK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc proxy-switch -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MC7i3GC3Lz ++ cat /tmp/tmp.DKV7ov0FeK ++ rm /tmp/tmp.MC7i3GC3Lz /tmp/tmp.DKV7ov0FeK ++ return 0 + [[ 3 == \3 ]] + echo + desc 'verify HAProxy service exists' + set +o xtrace ----------------------------------------------------------------------------------- verify HAProxy service exists ----------------------------------------------------------------------------------- + kubectl_bin get service proxy-switch-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.BHPpM8gXdd ++ mktemp + local LAST_ERR=/tmp/tmp.a8N7zQBiCh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get service proxy-switch-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BHPpM8gXdd NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE proxy-switch-haproxy ClusterIP 34.118.235.2 3306/TCP,3309/TCP,33062/TCP,33060/TCP,8404/TCP 2m7s + cat /tmp/tmp.a8N7zQBiCh + rm /tmp/tmp.BHPpM8gXdd /tmp/tmp.a8N7zQBiCh + return 0 + desc 'write data and check connectivity through HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- write data and check connectivity through HAProxy ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7FtZjavZqm +++ mktemp ++ local LAST_ERR=/tmp/tmp.IxfFOxCqYB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7FtZjavZqm ++ cat /tmp/tmp.IxfFOxCqYB ++ rm /tmp/tmp.7FtZjavZqm /tmp/tmp.IxfFOxCqYB ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100502)' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100502)' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Af5rz5aJoa +++ mktemp ++ local LAST_ERR=/tmp/tmp.1k5VLbNw4A ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Af5rz5aJoa ++ cat /tmp/tmp.1k5VLbNw4A ++ rm /tmp/tmp.Af5rz5aJoa /tmp/tmp.1k5VLbNw4A ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 10 + compare_mysql_cmd select-3 'SELECT * from myApp.myApp;' '-h proxy-switch-haproxy -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-haproxy -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sITfl1gMAd +++ mktemp ++ local LAST_ERR=/tmp/tmp.q2IQskH79O ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sITfl1gMAd ++ cat /tmp/tmp.q2IQskH79O ++ rm /tmp/tmp.sITfl1gMAd /tmp/tmp.q2IQskH79O ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-3.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql /tmp/tmp.nls9O08usK/select-3.sql + desc 'verify data exists on all PXC nodes' + set +o xtrace ----------------------------------------------------------------------------------- verify data exists on all PXC nodes ----------------------------------------------------------------------------------- + compare_mysql_cmd select-3 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-0.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WNTsvhznq6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.11TfMrIMAr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WNTsvhznq6 ++ cat /tmp/tmp.11TfMrIMAr ++ rm /tmp/tmp.WNTsvhznq6 /tmp/tmp.11TfMrIMAr ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-3.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql /tmp/tmp.nls9O08usK/select-3.sql + compare_mysql_cmd select-3 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-1.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rcMsQ771RT +++ mktemp ++ local LAST_ERR=/tmp/tmp.xlUZLukZiH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rcMsQ771RT ++ cat /tmp/tmp.xlUZLukZiH ++ rm /tmp/tmp.rcMsQ771RT /tmp/tmp.xlUZLukZiH ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-3.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql /tmp/tmp.nls9O08usK/select-3.sql + compare_mysql_cmd select-3 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h proxy-switch-pxc-2.proxy-switch-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SGblvF6xVy +++ mktemp ++ local LAST_ERR=/tmp/tmp.X5J2FJgvSD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SGblvF6xVy ++ cat /tmp/tmp.X5J2FJgvSD ++ rm /tmp/tmp.SGblvF6xVy /tmp/tmp.X5J2FJgvSD ++ return 0 + client_pod=pxc-client-59944c5bbf-mqq25 + wait_pod pxc-client-59944c5bbf-mqq25 + local pod=pxc-client-59944c5bbf-mqq25 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-mqq25 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-mqq25 condition met waiting for pod/pxc-client-59944c5bbf-mqq25 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nls9O08usK/select-3.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2261/e2e-tests/proxy-switch/compare/select-3.sql /tmp/tmp.nls9O08usK/select-3.sql + desc 'clean up' + set +o xtrace ----------------------------------------------------------------------------------- clean up ----------------------------------------------------------------------------------- + destroy proxy-switch-7169 + local namespace=proxy-switch-7169 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info ++ get_operator_pod + grep -v 'the object has been modified' ++ local label_prefix=app.kubernetes.io/ + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.nls9O08usK/operator.log +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.3iUlgBci0N +++ mktemp ++ local LAST_ERR=/tmp/tmp.eaOFdAjrn9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3iUlgBci0N ++ cat /tmp/tmp.eaOFdAjrn9 ++ rm /tmp/tmp.3iUlgBci0N /tmp/tmp.eaOFdAjrn9 ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-b5ccc7b5f-k55bf ++ mktemp + local LAST_OUT=/tmp/tmp.0BHCG0jv2z ++ mktemp + local LAST_ERR=/tmp/tmp.C1ASBYOzxX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-b5ccc7b5f-k55bf + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0BHCG0jv2z + cat /tmp/tmp.C1ASBYOzxX + rm /tmp/tmp.0BHCG0jv2z /tmp/tmp.C1ASBYOzxX + return 0 2025-11-30T18:32:10.099Z INFO setup Manager starting up {"gitCommit": "65dc3545d046a6615fce925139e8c34d7d10a02a", "gitBranch": "PR-2261-65dc3545", "buildTime": "2025-11-30T17:27:00Z", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} 2025-11-30T18:32:10.099Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.14-gke.1033000"} 2025-11-30T18:32:10.103Z INFO setup Registering Components. 2025-11-30T18:32:10.985Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-11-30T18:32:10.985Z INFO controller-runtime.metrics Starting metrics server 2025-11-30T18:32:10.985Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-11-30T18:32:10.985Z INFO controller-runtime.webhook Starting webhook server 2025-11-30T18:32:10.985Z INFO setup Starting the Cmd. 2025-11-30T18:32:10.985Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-11-30T18:32:10.986Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-11-30T18:32:10.986Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-11-30T18:32:10.986Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-11-30T18:32:11.086Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-11-30T18:32:11.128Z DEBUG events percona-xtradb-cluster-operator-b5ccc7b5f-k55bf_5b7b0b3e-b7a4-45d2-8a5b-d4ffcff0cf4b became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"2a90a56f-a21b-4932-a2c5-3f00b5c6c5b1","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1764527531119423009"}, "reason": "LeaderElection"} 2025-11-30T18:32:11.128Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2025-11-30T18:32:11.128Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-11-30T18:32:11.128Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-11-30T18:32:11.129Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-11-30T18:32:11.129Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-11-30T18:32:11.229Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2025-11-30T18:32:11.229Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2025-11-30T18:32:11.229Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2025-11-30T18:32:11.229Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2025-11-30T18:32:11.230Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2025-11-30T18:32:11.230Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2025-11-30T18:32:49.452Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "version": "1.19.0"} 2025-11-30T18:32:51.410Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "auto-proxy-switch-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2025-11-30T18:32:51.534Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-30T18:32:51.570Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-30T18:32:51.618Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:32:51.673Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:32:51.750Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:32:52.483Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "9461e699-7d44-4773-8616-77cc6d1e6903", "object": "proxy-switch-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:32:53.978Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "19d390c7-1be8-42dc-a36b-62a1d43eccf9", "object": "proxy-switch-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-30T18:32:53.996Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "19d390c7-1be8-42dc-a36b-62a1d43eccf9", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-30T18:34:05.465Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711", "user": "operator"} 2025-11-30T18:34:05.509Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711", "user": "monitor"} 2025-11-30T18:34:05.571Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711"} 2025-11-30T18:34:05.609Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711"} 2025-11-30T18:34:05.654Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711", "user": "xtrabackup"} 2025-11-30T18:34:05.706Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711"} 2025-11-30T18:34:05.735Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "63364e21-3d5d-43e1-9d7b-f08dd0837711", "user": "replication"} 2025-11-30T18:36:30.588Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "a3d0d900-27ad-40c4-b881-619dadd2d667", "user": "root"} 2025-11-30T18:36:30.703Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "a3d0d900-27ad-40c4-b881-619dadd2d667", "new version": "8.0.43-34.1"} 2025-11-30T18:39:16.842Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "f5edcc81-2940-4aff-abaa-0465ccaad1bd", "object": "proxy-switch-proxysql", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-30T18:39:17.140Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "f5edcc81-2940-4aff-abaa-0465ccaad1bd", "object": "proxy-switch-proxysql", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:39:17.274Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "f5edcc81-2940-4aff-abaa-0465ccaad1bd", "object": "proxy-switch-proxysql-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:39:20.517Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "f5edcc81-2940-4aff-abaa-0465ccaad1bd", "err": "get primary pxc pod: failed to get proxy connection: dial tcp: lookup proxy-switch-proxysql-unready.proxy-switch-7169 on 34.118.224.10:53: no such host"} 2025-11-30T18:39:21.258Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "1aa3f7bb-caa2-4ce7-a4e1-d133b7bcce70", "object": "proxy-switch-proxysql", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-30T18:39:25.396Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "1aa3f7bb-caa2-4ce7-a4e1-d133b7bcce70", "err": "get primary pxc pod: failed to get proxy connection: dial tcp: lookup proxy-switch-proxysql-unready.proxy-switch-7169 on 34.118.224.10:53: no such host"} 2025-11-30T18:39:29.469Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "48a727aa-cb3a-4a97-8035-ca2bdc3465d2", "err": "get primary pxc pod: failed to get proxy connection: dial tcp: lookup proxy-switch-proxysql-unready.proxy-switch-7169 on 34.118.224.10:53: no such host"} 2025-11-30T18:39:35.678Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "e389b625-6025-4f2a-ba84-810dabd64938", "err": "get primary pxc pod: not found"} 2025-11-30T18:39:35.919Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "294758b3-82aa-4afc-8196-955d775287b7", "err": "get primary pxc pod: not found"} 2025-11-30T18:39:40.907Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "48ad495d-8a57-4e8b-acfa-17194593f696", "err": "get primary pxc pod: not found"} 2025-11-30T18:39:46.161Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "1e853f43-73aa-4f04-83c9-7cd54bf11e12", "err": "get primary pxc pod: not found"} 2025-11-30T18:40:07.230Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "e2823ad1-d7ee-47a4-9bcf-f9c89391166c", "err": "get primary pxc pod: not found"} 2025-11-30T18:40:20.376Z ERROR sync users {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "3b121a2f-d9d0-4795-8672-3dd6bed7872c", "error": "exec syncusers: failed to execute command in pod: command terminated with exit code 1 / / ERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n", "errorVerbose": "exec syncusers: failed to execute command in pod: command terminated with exit code 1 / / ERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\nERROR (line:515) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:975\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:855\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2025-11-30T18:40:27.882Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "8880d3ab-899e-445d-a9f6-31e74df1f52a"} 2025-11-30T18:40:32.896Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "e2f18fcf-b13b-4c10-a0bb-7da789ae005b"} 2025-11-30T18:40:38.692Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "d901f143-81c4-4d0b-8939-21fbcd72904b"} 2025-11-30T18:40:43.151Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "145a9ee3-7cfc-40e5-8ee1-75506a715734"} 2025-11-30T18:40:48.951Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ddf863f-8ae9-4498-8ba7-f901e1dd2395"} 2025-11-30T18:40:54.173Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "77c826fc-fd5c-45d6-875d-d43b781317b9"} 2025-11-30T18:40:59.265Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5f722d99-777e-4d3f-8cff-ec85a05f4979"} 2025-11-30T18:41:04.489Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "e3a42123-a7a8-47f0-b685-55e620fb0796"} 2025-11-30T18:41:10.085Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "1230398f-3618-4db5-be9a-4e8d5f184208"} 2025-11-30T18:41:15.285Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "c462f4de-b951-4235-9df4-b418723217ed"} 2025-11-30T18:41:20.464Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "45023098-661a-4d1b-8807-2eb2322274f2"} 2025-11-30T18:41:26.884Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "d9c1777a-0dfe-48b5-ad22-9325fd380d73"} 2025-11-30T18:41:32.167Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "cfb46889-005a-4144-8b7b-a6826e5ebb5f"} 2025-11-30T18:41:37.602Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "fc38c01a-32a5-4e83-b544-364ef4374254"} 2025-11-30T18:41:37.872Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ebe827b-8ca6-4364-ac7f-00e4cd5e88e7", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-30T18:41:37.969Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ebe827b-8ca6-4364-ac7f-00e4cd5e88e7", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-30T18:41:38.069Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ebe827b-8ca6-4364-ac7f-00e4cd5e88e7", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:41:38.169Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ebe827b-8ca6-4364-ac7f-00e4cd5e88e7", "object": "proxy-switch-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-30T18:41:41.833Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "5ebe827b-8ca6-4364-ac7f-00e4cd5e88e7", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.235.2:3306: connect: connection refused"} 2025-11-30T18:41:42.691Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "dd638200-5a67-4659-9b85-d0755ceb471e", "object": "proxy-switch-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-30T18:41:45.719Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "dd638200-5a67-4659-9b85-d0755ceb471e", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.235.2:3306: connect: connection refused"} 2025-11-30T18:41:50.686Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "1645b5e1-e547-4c06-adb0-4bc677ce27ab", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.235.2:3306: connect: connection refused"} 2025-11-30T18:41:58.886Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"proxy-switch","namespace":"proxy-switch-7169"}, "namespace": "proxy-switch-7169", "name": "proxy-switch", "reconcileID": "aabb9bb3-d663-436d-9805-6c787e6dea63", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.235.2:3306: connect: connection refused"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:857 + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n proxy-switch-7169 proxy-switch --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/proxy-switch patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.TrxRPzefrW ++ mktemp + local LAST_ERR=/tmp/tmp.06OVmNpO6w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TrxRPzefrW perconaxtradbcluster.pxc.percona.com "proxy-switch" deleted from proxy-switch-7169 namespace + cat /tmp/tmp.06OVmNpO6w + rm /tmp/tmp.TrxRPzefrW /tmp/tmp.06OVmNpO6w + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.AFLw6NV28H ++ mktemp + local LAST_ERR=/tmp/tmp.wkWdTDKJuR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AFLw6NV28H No resources found + cat /tmp/tmp.wkWdTDKJuR + rm /tmp/tmp.AFLw6NV28H /tmp/tmp.wkWdTDKJuR + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ho0Se67Gdl ++ mktemp + local LAST_ERR=/tmp/tmp.gqApZee1Nn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ho0Se67Gdl No resources found + cat /tmp/tmp.gqApZee1Nn + rm /tmp/tmp.ho0Se67Gdl /tmp/tmp.gqApZee1Nn + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.zEBDPkxvzf ++ mktemp + local LAST_ERR=/tmp/tmp.i6wWNexBha + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zEBDPkxvzf validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.i6wWNexBha + rm /tmp/tmp.zEBDPkxvzf /tmp/tmp.i6wWNexBha + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.nls9O08usK + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace proxy-switch-7169 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.g6K5HZFrjJ + local LAST_OUT=/tmp/tmp.1NsnYIZSeF ++ mktemp + desc 'test passed' + set +o xtrace + local LAST_ERR=/tmp/tmp.12kiR0jTmw + local exit_status=0 ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + local LAST_ERR=/tmp/tmp.YqTB2TBMWr + local exit_status=0 + kubectl delete --grace-period=0 --force=true namespace proxy-switch-7169 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator