Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/logs/haproxy-8-0.log + main + create_infra haproxy-24220 + local ns=haproxy-24220 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n haproxy-11946 haproxy --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/haproxy patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.1odq8hW2WW ++ mktemp + local LAST_ERR=/tmp/tmp.Y7hjLKAu87 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1odq8hW2WW perconaxtradbcluster.pxc.percona.com "haproxy" deleted + cat /tmp/tmp.Y7hjLKAu87 + rm /tmp/tmp.1odq8hW2WW /tmp/tmp.Y7hjLKAu87 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.kQTPB0DBwe ++ mktemp + local LAST_ERR=/tmp/tmp.1vzUt4GP0K + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kQTPB0DBwe No resources found + cat /tmp/tmp.1vzUt4GP0K + rm /tmp/tmp.kQTPB0DBwe /tmp/tmp.1vzUt4GP0K + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.695f9FaGu7 ++ mktemp + local LAST_ERR=/tmp/tmp.90ymHTJwcg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.695f9FaGu7 No resources found + cat /tmp/tmp.90ymHTJwcg + rm /tmp/tmp.695f9FaGu7 /tmp/tmp.90ymHTJwcg + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.4Wbi1jetZD + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.yLA0uYiS9k + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pclvU0F6yi ++ mktemp + local LAST_ERR=/tmp/tmp.nS9jN059Kd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pclvU0F6yi + cat /tmp/tmp.nS9jN059Kd + rm /tmp/tmp.pclvU0F6yi /tmp/tmp.nS9jN059Kd + return 0 namespace "haproxy-11946" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4Wbi1jetZD namespace "pxc-operator" deleted + cat /tmp/tmp.yLA0uYiS9k + rm /tmp/tmp.4Wbi1jetZD /tmp/tmp.yLA0uYiS9k + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5TWcJMThHN ++ mktemp + local LAST_ERR=/tmp/tmp.1uzavJ2rZl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5TWcJMThHN namespace/pxc-operator created + cat /tmp/tmp.1uzavJ2rZl + rm /tmp/tmp.5TWcJMThHN /tmp/tmp.1uzavJ2rZl + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.hW0drrWxVS +++ mktemp ++ local LAST_ERR=/tmp/tmp.bb1CJbXyBg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hW0drrWxVS ++ cat /tmp/tmp.bb1CJbXyBg ++ rm /tmp/tmp.hW0drrWxVS /tmp/tmp.bb1CJbXyBg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.SWn9Up9XG3 ++ mktemp + local LAST_ERR=/tmp/tmp.jzoPResQV6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SWn9Up9XG3 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8" modified. + cat /tmp/tmp.jzoPResQV6 + rm /tmp/tmp.SWn9Up9XG3 /tmp/tmp.jzoPResQV6 + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jBUnvREVJJ ++ mktemp + local LAST_ERR=/tmp/tmp.A3eoOdd8hY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jBUnvREVJJ customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.A3eoOdd8hY + rm /tmp/tmp.jBUnvREVJJ /tmp/tmp.A3eoOdd8hY + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.L85dDd9hE6 ++ mktemp + local LAST_ERR=/tmp/tmp.wNG94Cqkwx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.L85dDd9hE6 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.wNG94Cqkwx + rm /tmp/tmp.L85dDd9hE6 /tmp/tmp.wNG94Cqkwx + return 0 + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/cw-operator.yaml + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4^' ++ mktemp + local LAST_OUT=/tmp/tmp.NxYfGDDKWw ++ mktemp + local LAST_ERR=/tmp/tmp.ry01v0OC5l + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NxYfGDDKWw deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.ry01v0OC5l + rm /tmp/tmp.NxYfGDDKWw /tmp/tmp.ry01v0OC5l + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.OqDMqKYtaB +++ mktemp ++ local LAST_ERR=/tmp/tmp.tvyVdq8rrh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OqDMqKYtaB ++ cat /tmp/tmp.tvyVdq8rrh ++ rm /tmp/tmp.OqDMqKYtaB /tmp/tmp.tvyVdq8rrh ++ return 0 + wait_pod percona-xtradb-cluster-operator-58bddf54b8-qshcs 480 pxc-operator + local pod=percona-xtradb-cluster-operator-58bddf54b8-qshcs + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-58bddf54b8-qshcs ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace percona-xtradb-cluster-operator-58bddf54b8-qshcs.Ok + sleep 3 + create_namespace haproxy-24220 + local namespace=haproxy-24220 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces haproxy-24220' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces haproxy-24220 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace haproxy-24220 + awk '{print$1}' ++ mktemp + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.ftvv0MRR7y + local LAST_OUT=/tmp/tmp.Owd5KUuldw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.oKnKODmK79 + local exit_status=0 + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.pbrOykcA0e + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-24220 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Owd5KUuldw + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.oKnKODmK79 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-24220 + rm /tmp/tmp.Owd5KUuldw /tmp/tmp.oKnKODmK79 + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-24220 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.ftvv0MRR7y + cat /tmp/tmp.pbrOykcA0e Error from server (NotFound): namespaces "haproxy-24220" not found + rm /tmp/tmp.ftvv0MRR7y /tmp/tmp.pbrOykcA0e + return 1 + : + wait_for_delete namespace/haproxy-24220 + local res=namespace/haproxy-24220 + echo -n 'namespace/haproxy-24220 - ' namespace/haproxy-24220 - + set +o xtrace Error from server (NotFound): namespaces "haproxy-24220" not found + desc 'create namespace haproxy-24220' + set +o xtrace ----------------------------------------------------------------------------------- create namespace haproxy-24220 ----------------------------------------------------------------------------------- + kubectl_bin create namespace haproxy-24220 ++ mktemp + local LAST_OUT=/tmp/tmp.Tu1aT6IEOF ++ mktemp + local LAST_ERR=/tmp/tmp.u7cHXM8sqR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace haproxy-24220 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Tu1aT6IEOF namespace/haproxy-24220 created + cat /tmp/tmp.u7cHXM8sqR + rm /tmp/tmp.Tu1aT6IEOF /tmp/tmp.u7cHXM8sqR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.wh5cLgucsJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7j5b5cX8dJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wh5cLgucsJ ++ cat /tmp/tmp.7j5b5cX8dJ ++ rm /tmp/tmp.wh5cLgucsJ /tmp/tmp.7j5b5cX8dJ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8 --namespace=haproxy-24220 ++ mktemp + local LAST_OUT=/tmp/tmp.f5fasDT6Jb ++ mktemp + local LAST_ERR=/tmp/tmp.4yG7QxL5Rk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8 --namespace=haproxy-24220 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f5fasDT6Jb Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster8" modified. + cat /tmp/tmp.4yG7QxL5Rk + rm /tmp/tmp.f5fasDT6Jb /tmp/tmp.4yG7QxL5Rk + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BeXMvpoB72 ++ mktemp + local LAST_ERR=/tmp/tmp.1C76ljGsGS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BeXMvpoB72 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.1C76ljGsGS + rm /tmp/tmp.BeXMvpoB72 /tmp/tmp.1C76ljGsGS + return 0 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.19 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-24220~ + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.4RKDZoRA7C ++ mktemp + local LAST_ERR=/tmp/tmp.nAKgB1nlxY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4RKDZoRA7C runtimeclass.node.k8s.io/docker-rc unchanged + cat /tmp/tmp.nAKgB1nlxY Warning: node.k8s.io/v1beta1 RuntimeClass is deprecated in v1.22+, unavailable in v1.25+ + rm /tmp/tmp.4RKDZoRA7C /tmp/tmp.nAKgB1nlxY + return 0 + desc 'create first PXC cluster with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster with HAProxy ----------------------------------------------------------------------------------- + cluster=haproxy + spinup_pxc haproxy /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml 3 10 + local cluster=haproxy + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ablLgMidbl ++ mktemp + local LAST_ERR=/tmp/tmp.7nQSXIAEvy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ablLgMidbl secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.7nQSXIAEvy + rm /tmp/tmp.ablLgMidbl /tmp/tmp.7nQSXIAEvy + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-24220~ + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IDY7F2Kyvb ++ mktemp + local LAST_ERR=/tmp/tmp.5XlZ6UqYjx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IDY7F2Kyvb deployment.apps/pxc-client created + cat /tmp/tmp.5XlZ6UqYjx + rm /tmp/tmp.IDY7F2Kyvb /tmp/tmp.5XlZ6UqYjx + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-24220~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.zw2sjAGZqL ++ mktemp + local LAST_ERR=/tmp/tmp.7GI0T1Iahk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zw2sjAGZqL perconaxtradbcluster.pxc.percona.com/haproxy created + cat /tmp/tmp.7GI0T1Iahk + rm /tmp/tmp.zw2sjAGZqL /tmp/tmp.7GI0T1Iahk + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy haproxy ++ local target_cluster=haproxy +++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LCZhfw5Egc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JgUkkOIRxv +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.LCZhfw5Egc +++ cat /tmp/tmp.JgUkkOIRxv +++ rm /tmp/tmp.LCZhfw5Egc /tmp/tmp.JgUkkOIRxv +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo haproxy-haproxy ++ return + local proxy=haproxy-haproxy + wait_for_running haproxy-haproxy 1 + local name=haproxy-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-0 480 + local pod=haproxy-haproxy-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace haproxy-haproxy-0....................error: a container name must be specified for pod haproxy-haproxy-0, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo haproxy-pxc-1 + local container=pxc + set +o xtrace haproxy-pxc-1................Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-2................Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h haproxy-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADYMIl2y8O +++ mktemp ++ local LAST_ERR=/tmp/tmp.WekgJE6f6I ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ADYMIl2y8O ++ cat /tmp/tmp.WekgJE6f6I ++ rm /tmp/tmp.ADYMIl2y8O /tmp/tmp.WekgJE6f6I ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-5d749ff8b6-685bw + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h haproxy-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PejHnelltj +++ mktemp ++ local LAST_ERR=/tmp/tmp.sorzHIzoh4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PejHnelltj ++ cat /tmp/tmp.sorzHIzoh4 ++ rm /tmp/tmp.PejHnelltj /tmp/tmp.sorzHIzoh4 ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-685bw ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gw29EWOE1c +++ mktemp ++ local LAST_ERR=/tmp/tmp.5HaSX0FDjl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gw29EWOE1c ++ cat /tmp/tmp.5HaSX0FDjl ++ rm /tmp/tmp.gw29EWOE1c /tmp/tmp.5HaSX0FDjl ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-685bw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dkYJqJ0TLJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.dkYJqJ0TLJ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jyf0EpWHrG +++ mktemp ++ local LAST_ERR=/tmp/tmp.C6ufR2jmY1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Jyf0EpWHrG ++ cat /tmp/tmp.C6ufR2jmY1 ++ rm /tmp/tmp.Jyf0EpWHrG /tmp/tmp.C6ufR2jmY1 ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-685bw ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dkYJqJ0TLJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.dkYJqJ0TLJ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F7QFoKhDxr +++ mktemp ++ local LAST_ERR=/tmp/tmp.deFaSb1xiT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.F7QFoKhDxr ++ cat /tmp/tmp.deFaSb1xiT ++ rm /tmp/tmp.F7QFoKhDxr /tmp/tmp.deFaSb1xiT ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-685bw + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dkYJqJ0TLJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.dkYJqJ0TLJ/select-1.sql ++ is_keyring_plugin_in_use haproxy ++ local cluster=haproxy ++ kubectl_bin exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QpByjBabTu +++ mktemp ++ local LAST_ERR=/tmp/tmp.G1mQDYWmKR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QpByjBabTu ++ cat /tmp/tmp.G1mQDYWmKR Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.QpByjBabTu /tmp/tmp.G1mQDYWmKR ++ return 0 + '[' '' ']' + desc 'checking all haproxy pods point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- checking all haproxy pods point to the same writer ----------------------------------------------------------------------------------- + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo haproxy-pxc-1 ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-2.Ok + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rfiOdljzCe +++ mktemp ++ local LAST_ERR=/tmp/tmp.nHsbt6eSF3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rfiOdljzCe ++ cat /tmp/tmp.nHsbt6eSF3 ++ rm /tmp/tmp.rfiOdljzCe /tmp/tmp.nHsbt6eSF3 ++ return 0 + local haproxy_pod_ip=10.23.153.7 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.153.7 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.153.7 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XrDDkisg9g +++ mktemp ++ local LAST_ERR=/tmp/tmp.o4cixPDCpt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XrDDkisg9g ++ cat /tmp/tmp.o4cixPDCpt ++ rm /tmp/tmp.XrDDkisg9g /tmp/tmp.o4cixPDCpt ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-685bw ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yfl8wsJACw +++ mktemp ++ local LAST_ERR=/tmp/tmp.egn0CFRN4P ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yfl8wsJACw ++ cat /tmp/tmp.egn0CFRN4P ++ rm /tmp/tmp.yfl8wsJACw /tmp/tmp.egn0CFRN4P ++ return 0 + local haproxy_pod_ip=10.23.152.13 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.152.13 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.152.13 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2OjrXLpnJ8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IwvPoLEolS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2OjrXLpnJ8 ++ cat /tmp/tmp.IwvPoLEolS ++ rm /tmp/tmp.2OjrXLpnJ8 /tmp/tmp.IwvPoLEolS ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-685bw ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lMtNIwoAA0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yRpiTQO0jY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lMtNIwoAA0 ++ cat /tmp/tmp.yRpiTQO0jY ++ rm /tmp/tmp.lMtNIwoAA0 /tmp/tmp.yRpiTQO0jY ++ return 0 + local haproxy_pod_ip=10.23.154.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.154.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.154.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKo1oprfQT +++ mktemp ++ local LAST_ERR=/tmp/tmp.B8P7hvHOwS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.eKo1oprfQT ++ cat /tmp/tmp.B8P7hvHOwS ++ rm /tmp/tmp.eKo1oprfQT /tmp/tmp.B8P7hvHOwS ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-5d749ff8b6-685bw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.dkYJqJ0TLJ/server_id_0.sql //tmp/tmp.dkYJqJ0TLJ/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.dkYJqJ0TLJ/server_id_1.sql //tmp/tmp.dkYJqJ0TLJ/server_id_2.sql + desc 'delete active writer and checking all haproxy pods still point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- delete active writer and checking all haproxy pods still point to the same writer ----------------------------------------------------------------------------------- + kubectl_bin delete pod haproxy-pxc-0 ++ mktemp + local LAST_OUT=/tmp/tmp.8OY2vLPwFj ++ mktemp + local LAST_ERR=/tmp/tmp.ew6jEkprvI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod haproxy-pxc-0 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8OY2vLPwFj pod "haproxy-pxc-0" deleted + cat /tmp/tmp.ew6jEkprvI + rm /tmp/tmp.8OY2vLPwFj /tmp/tmp.ew6jEkprvI + return 0 + sleep 3 + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.xjBcQe0yhf ++ mktemp + local LAST_ERR=/tmp/tmp.gmJOFJ8sji + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xjBcQe0yhf NAME READY STATUS RESTARTS AGE haproxy-haproxy-0 3/3 Running 0 7m52s haproxy-haproxy-1 3/3 Running 0 6m37s haproxy-haproxy-2 3/3 Running 0 6m16s haproxy-pxc-0 0/1 Init:0/1 0 7s haproxy-pxc-1 1/1 Running 0 6m39s haproxy-pxc-2 1/1 Running 0 5m27s pxc-client-5d749ff8b6-685bw 1/1 Running 0 8m7s + cat /tmp/tmp.gmJOFJ8sji + rm /tmp/tmp.xjBcQe0yhf /tmp/tmp.gmJOFJ8sji + return 0 + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IFD69ozTNV +++ mktemp ++ local LAST_ERR=/tmp/tmp.k5Z3ECdSb0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IFD69ozTNV ++ cat /tmp/tmp.k5Z3ECdSb0 ++ rm /tmp/tmp.IFD69ozTNV /tmp/tmp.k5Z3ECdSb0 ++ return 0 + local haproxy_pod_ip=10.23.153.7 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.153.7 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.153.7 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.77qUXUSo6y +++ mktemp ++ local LAST_ERR=/tmp/tmp.f9xRh8DCcj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.77qUXUSo6y ++ cat /tmp/tmp.f9xRh8DCcj ++ rm /tmp/tmp.77qUXUSo6y /tmp/tmp.f9xRh8DCcj ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-685bw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Txtq2JbYo9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SWkLQm4IYg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Txtq2JbYo9 ++ cat /tmp/tmp.SWkLQm4IYg ++ rm /tmp/tmp.Txtq2JbYo9 /tmp/tmp.SWkLQm4IYg ++ return 0 + local haproxy_pod_ip=10.23.152.13 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.152.13 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.152.13 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sqRpv55a9c +++ mktemp ++ local LAST_ERR=/tmp/tmp.dd0Z4Tilrj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sqRpv55a9c ++ cat /tmp/tmp.dd0Z4Tilrj ++ rm /tmp/tmp.sqRpv55a9c /tmp/tmp.dd0Z4Tilrj ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-5d749ff8b6-685bw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5GeVgvIMac +++ mktemp ++ local LAST_ERR=/tmp/tmp.48ouF7DC3k ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5GeVgvIMac ++ cat /tmp/tmp.48ouF7DC3k ++ rm /tmp/tmp.5GeVgvIMac /tmp/tmp.48ouF7DC3k ++ return 0 + local haproxy_pod_ip=10.23.154.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.23.154.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.23.154.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tkAsM36WGm +++ mktemp ++ local LAST_ERR=/tmp/tmp.o8kLE78csi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tkAsM36WGm ++ cat /tmp/tmp.o8kLE78csi ++ rm /tmp/tmp.tkAsM36WGm /tmp/tmp.o8kLE78csi ++ return 0 + client_pod=pxc-client-5d749ff8b6-685bw + wait_pod pxc-client-5d749ff8b6-685bw + local pod=pxc-client-5d749ff8b6-685bw + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-5d749ff8b6-685bw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-685bw.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.dkYJqJ0TLJ/server_id_0.sql //tmp/tmp.dkYJqJ0TLJ/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.dkYJqJ0TLJ/server_id_1.sql //tmp/tmp.dkYJqJ0TLJ/server_id_2.sql --- //tmp/tmp.dkYJqJ0TLJ/server_id_1.sql 2023-04-20 10:57:24.841668993 +0000 +++ //tmp/tmp.dkYJqJ0TLJ/server_id_2.sql 2023-04-20 10:57:41.853857506 +0000 @@ -1 +1 @@ -server_id 17032672 +server_id 17032670