++ echo 'Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/haproxy.log' Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/haproxy.log ++ '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: No Auth Provider found for name "gcp" +++ grep '\-eks\-' +++ jq -r .serverVersion.gitVersion +++ kubectl version -o json ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' ++ KUBE_VERSION=1.20 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.8.1 ++ '[' v3 == v2 ']' + main + create_infra haproxy-26002 + local ns=haproxy-26002 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n haproxy-25389 haproxy --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/haproxy patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.e8ubLsWVpn ++ mktemp + local LAST_ERR=/tmp/tmp.Tc0ytv0FFO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.e8ubLsWVpn perconaxtradbcluster.pxc.percona.com "haproxy" deleted + cat /tmp/tmp.Tc0ytv0FFO + rm /tmp/tmp.e8ubLsWVpn /tmp/tmp.Tc0ytv0FFO + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.CaOy8QIKH1 ++ mktemp + local LAST_ERR=/tmp/tmp.Y4dBpsekNV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.CaOy8QIKH1 No resources found + cat /tmp/tmp.Y4dBpsekNV + rm /tmp/tmp.CaOy8QIKH1 /tmp/tmp.Y4dBpsekNV + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.OcpHdcBtW7 ++ mktemp + local LAST_ERR=/tmp/tmp.Scr3EfUNES + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.OcpHdcBtW7 No resources found + cat /tmp/tmp.Scr3EfUNES + rm /tmp/tmp.OcpHdcBtW7 /tmp/tmp.Scr3EfUNES + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + '[' '!' -z '' ']' + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.xibKYHcoJP ++ mktemp + local LAST_ERR=/tmp/tmp.LrQ9zLRDKv + local exit_status=0 ++ seq 0 2 ++ mktemp + local LAST_OUT=/tmp/tmp.LJenoAWSbk ++ mktemp + for i in '$(seq 0 2)' + kubectl get ns + local LAST_ERR=/tmp/tmp.nkeKgEfGeO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.xibKYHcoJP + cat /tmp/tmp.LrQ9zLRDKv + rm /tmp/tmp.xibKYHcoJP /tmp/tmp.LrQ9zLRDKv + return 0 namespace "haproxy-25389" deleted + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.LJenoAWSbk namespace "pxc-operator" deleted + cat /tmp/tmp.nkeKgEfGeO + rm /tmp/tmp.LJenoAWSbk /tmp/tmp.nkeKgEfGeO + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + set +o xtrace namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FKBX6guHuI ++ mktemp + local LAST_ERR=/tmp/tmp.Jsec0oLDzC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.FKBX6guHuI namespace/pxc-operator created + cat /tmp/tmp.Jsec0oLDzC + rm /tmp/tmp.FKBX6guHuI /tmp/tmp.Jsec0oLDzC + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.r9EYCKMvXq +++ mktemp ++ local LAST_ERR=/tmp/tmp.IUzWNg03sR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.r9EYCKMvXq ++ cat /tmp/tmp.IUzWNg03sR ++ rm /tmp/tmp.r9EYCKMvXq /tmp/tmp.IUzWNg03sR ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.g1DkX1JrBz ++ mktemp + local LAST_ERR=/tmp/tmp.bB4ZL0po5D + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.g1DkX1JrBz Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic" modified. + cat /tmp/tmp.bB4ZL0po5D + rm /tmp/tmp.g1DkX1JrBz /tmp/tmp.bB4ZL0po5D + return 0 + deploy_operator + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.GVft2IankX ++ mktemp + local LAST_ERR=/tmp/tmp.wswtEWfkys + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GVft2IankX customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbbackups.pxc.percona.com configured + cat /tmp/tmp.wswtEWfkys + rm /tmp/tmp.GVft2IankX /tmp/tmp.wswtEWfkys + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qUtZwikoQd ++ mktemp + local LAST_ERR=/tmp/tmp.VAm3ONdVwo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.qUtZwikoQd clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.VAm3ONdVwo + rm /tmp/tmp.qUtZwikoQd /tmp/tmp.VAm3ONdVwo + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.MKg53Gnjnu ++ mktemp + local LAST_ERR=/tmp/tmp.a47meM1t1B + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MKg53Gnjnu deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.a47meM1t1B + rm /tmp/tmp.MKg53Gnjnu /tmp/tmp.a47meM1t1B + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.fLu0V9fOMU +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9KnrxsFRD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.fLu0V9fOMU ++ cat /tmp/tmp.y9KnrxsFRD ++ rm /tmp/tmp.fLu0V9fOMU /tmp/tmp.y9KnrxsFRD ++ return 0 + wait_pod percona-xtradb-cluster-operator-5699d7755d-rzhv4 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5699d7755d-rzhv4 + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-5699d7755d-rzhv4 ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace percona-xtradb-cluster-operator-5699d7755d-rzhv4.Ok + sleep 3 + create_namespace haproxy-26002 + local namespace=haproxy-26002 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + '[' '!' -z '' ']' + kubectl_bin delete namespace haproxy-26002 ++ mktemp + xargs kubectl delete ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.2wmKhWjVFF + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.VrocucgVZF + local LAST_OUT=/tmp/tmp.k2rWqDIVZw + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + kubectl delete namespace haproxy-26002 + local LAST_ERR=/tmp/tmp.iBh9nxrk48 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace haproxy-26002 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.k2rWqDIVZw + cat /tmp/tmp.iBh9nxrk48 + rm /tmp/tmp.k2rWqDIVZw /tmp/tmp.iBh9nxrk48 + return 0 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace haproxy-26002 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.2wmKhWjVFF + cat /tmp/tmp.VrocucgVZF Error from server (NotFound): namespaces "haproxy-26002" not found + rm /tmp/tmp.2wmKhWjVFF /tmp/tmp.VrocucgVZF + return 1 + : + wait_for_delete namespace/haproxy-26002 + local res=namespace/haproxy-26002 + set +o xtrace namespace/haproxy-26002 - Error from server (NotFound): namespaces "haproxy-26002" not found + kubectl_bin create namespace haproxy-26002 ++ mktemp + local LAST_OUT=/tmp/tmp.xR0Zg1SfAT ++ mktemp + local LAST_ERR=/tmp/tmp.TZq8Zm3pWC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace haproxy-26002 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.xR0Zg1SfAT namespace/haproxy-26002 created + cat /tmp/tmp.TZq8Zm3pWC + rm /tmp/tmp.xR0Zg1SfAT /tmp/tmp.TZq8Zm3pWC + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XKsixbeAPq +++ mktemp ++ local LAST_ERR=/tmp/tmp.imoUvxSuZa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.XKsixbeAPq ++ cat /tmp/tmp.imoUvxSuZa ++ rm /tmp/tmp.XKsixbeAPq /tmp/tmp.imoUvxSuZa ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=haproxy-26002 ++ mktemp + local LAST_OUT=/tmp/tmp.mXzc9f6KRQ ++ mktemp + local LAST_ERR=/tmp/tmp.oHMmgYDeSx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=haproxy-26002 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.mXzc9f6KRQ Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic" modified. + cat /tmp/tmp.oHMmgYDeSx + rm /tmp/tmp.mXzc9f6KRQ /tmp/tmp.oHMmgYDeSx + return 0 + apply_secrets + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3OtM8QVc3u ++ mktemp + local LAST_ERR=/tmp/tmp.2n13ee6NSq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.3OtM8QVc3u secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.2n13ee6NSq + rm /tmp/tmp.3OtM8QVc3u /tmp/tmp.2n13ee6NSq + return 0 + version_gt 1.19 ++ echo '1.20 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/container-rc.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/container-rc.yaml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/container-rc.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.nE440bmhj8 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + local LAST_ERR=/tmp/tmp.qmkV3hvdx4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.nE440bmhj8 runtimeclass.node.k8s.io/docker-rc unchanged + cat /tmp/tmp.qmkV3hvdx4 + rm /tmp/tmp.nE440bmhj8 /tmp/tmp.qmkV3hvdx4 + return 0 + desc 'create first PXC cluster with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster with HAProxy ----------------------------------------------------------------------------------- + cluster=haproxy + spinup_pxc haproxy /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml 3 10 + local cluster=haproxy + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.LEBEZVQ96O ++ mktemp + local LAST_ERR=/tmp/tmp.XatzMyjsLO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.LEBEZVQ96O secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.XatzMyjsLO + rm /tmp/tmp.LEBEZVQ96O /tmp/tmp.XatzMyjsLO + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.8ijXPTRgnR + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_ERR=/tmp/tmp.SbjnGIinR4 + local exit_status=0 + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.8ijXPTRgnR deployment.apps/pxc-client created + cat /tmp/tmp.SbjnGIinR4 + rm /tmp/tmp.8ijXPTRgnR /tmp/tmp.SbjnGIinR4 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.RumvoQMhki + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' ++ mktemp + local LAST_ERR=/tmp/tmp.zs21prsfW4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.RumvoQMhki perconaxtradbcluster.pxc.percona.com/haproxy created + cat /tmp/tmp.zs21prsfW4 + rm /tmp/tmp.RumvoQMhki /tmp/tmp.zs21prsfW4 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy haproxy ++ local target_cluster=haproxy +++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Z1z9XZFYAm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O7RVXqY2Yd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.Z1z9XZFYAm +++ cat /tmp/tmp.O7RVXqY2Yd +++ rm /tmp/tmp.Z1z9XZFYAm /tmp/tmp.O7RVXqY2Yd +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo haproxy-haproxy ++ return + local proxy=haproxy-haproxy + wait_for_running haproxy-haproxy 1 + local name=haproxy-haproxy + let last_pod=0 + : + local max_retry=480 ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-0 480 + local pod=haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-0......................................error: a container name must be specified for pod haproxy-haproxy-0, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo haproxy-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-1.....................Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-2......................................Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h haproxy-haproxy -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h haproxy-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Myw5eCvuGa +++ mktemp ++ local LAST_ERR=/tmp/tmp.vLgLwcFGGb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.Myw5eCvuGa ++ cat /tmp/tmp.vLgLwcFGGb ++ rm /tmp/tmp.Myw5eCvuGa /tmp/tmp.vLgLwcFGGb ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h haproxy-haproxy -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h haproxy-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FKq6my25xk +++ mktemp ++ local LAST_ERR=/tmp/tmp.aUZyFbD1b2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.FKq6my25xk ++ cat /tmp/tmp.aUZyFbD1b2 ++ rm /tmp/tmp.FKq6my25xk /tmp/tmp.aUZyFbD1b2 ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-j68dc + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kHjATt5df0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mIUnMDYcUK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.kHjATt5df0 ++ cat /tmp/tmp.mIUnMDYcUK ++ rm /tmp/tmp.kHjATt5df0 /tmp/tmp.mIUnMDYcUK ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nTExTa49cV/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.nTExTa49cV/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jblMpHlghc +++ mktemp ++ local LAST_ERR=/tmp/tmp.9iJeQo22SN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.jblMpHlghc ++ cat /tmp/tmp.9iJeQo22SN ++ rm /tmp/tmp.jblMpHlghc /tmp/tmp.9iJeQo22SN ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nTExTa49cV/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.nTExTa49cV/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5EWcYdJeux +++ mktemp ++ local LAST_ERR=/tmp/tmp.dyjg2KfgRs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.5EWcYdJeux ++ cat /tmp/tmp.dyjg2KfgRs ++ rm /tmp/tmp.5EWcYdJeux /tmp/tmp.dyjg2KfgRs ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.nTExTa49cV/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.nTExTa49cV/select-1.sql ++ is_keyring_plugin_in_use haproxy ++ local cluster=haproxy ++ kubectl_bin exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uRUfpI8GG0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.faMZpr1HlZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.uRUfpI8GG0 ++ cat /tmp/tmp.faMZpr1HlZ Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.uRUfpI8GG0 /tmp/tmp.faMZpr1HlZ ++ return 0 + '[' '' ']' + desc 'checking all haproxy pods point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- checking all haproxy pods point to the same writer ----------------------------------------------------------------------------------- + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-2.Ok + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xiBefcmunx +++ mktemp ++ local LAST_ERR=/tmp/tmp.DHZcVa82JA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.xiBefcmunx ++ cat /tmp/tmp.DHZcVa82JA ++ rm /tmp/tmp.xiBefcmunx /tmp/tmp.DHZcVa82JA ++ return 0 + local haproxy_pod_ip=10.4.2.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.2.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.2.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qRIbwf4Hx3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jw5bl6Nrvm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qRIbwf4Hx3 ++ cat /tmp/tmp.Jw5bl6Nrvm ++ rm /tmp/tmp.qRIbwf4Hx3 /tmp/tmp.Jw5bl6Nrvm ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pIRFLvR3Hh +++ mktemp ++ local LAST_ERR=/tmp/tmp.01TvoBx1FV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.pIRFLvR3Hh ++ cat /tmp/tmp.01TvoBx1FV ++ rm /tmp/tmp.pIRFLvR3Hh /tmp/tmp.01TvoBx1FV ++ return 0 + local haproxy_pod_ip=10.4.1.10 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.1.10 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.1.10 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bNdJBef3kz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZDOaM0TqRW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.bNdJBef3kz ++ cat /tmp/tmp.ZDOaM0TqRW ++ rm /tmp/tmp.bNdJBef3kz /tmp/tmp.ZDOaM0TqRW ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3eQFWcdgpA +++ mktemp ++ local LAST_ERR=/tmp/tmp.EElHtP6xsb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.3eQFWcdgpA ++ cat /tmp/tmp.EElHtP6xsb ++ rm /tmp/tmp.3eQFWcdgpA /tmp/tmp.EElHtP6xsb ++ return 0 + local haproxy_pod_ip=10.4.0.15 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.0.15 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.0.15 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pk6I0yuOzL +++ mktemp ++ local LAST_ERR=/tmp/tmp.uiSaDr816W ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.pk6I0yuOzL ++ cat /tmp/tmp.uiSaDr816W ++ rm /tmp/tmp.pk6I0yuOzL /tmp/tmp.uiSaDr816W ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.nTExTa49cV/server_id_0.sql //tmp/tmp.nTExTa49cV/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.nTExTa49cV/server_id_1.sql //tmp/tmp.nTExTa49cV/server_id_2.sql + desc 'delete active writer and checking all haproxy pods still point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- delete active writer and checking all haproxy pods still point to the same writer ----------------------------------------------------------------------------------- + kubectl_bin delete pod haproxy-pxc-0 ++ mktemp + local LAST_OUT=/tmp/tmp.gLqWr7P2ff ++ mktemp + local LAST_ERR=/tmp/tmp.BKiCCCAyRh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pod haproxy-pxc-0 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.gLqWr7P2ff pod "haproxy-pxc-0" deleted + cat /tmp/tmp.BKiCCCAyRh + rm /tmp/tmp.gLqWr7P2ff /tmp/tmp.BKiCCCAyRh + return 0 + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VOYDciXamh +++ mktemp ++ local LAST_ERR=/tmp/tmp.j0WVzdJf7g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.VOYDciXamh ++ cat /tmp/tmp.j0WVzdJf7g ++ rm /tmp/tmp.VOYDciXamh /tmp/tmp.j0WVzdJf7g ++ return 0 + local haproxy_pod_ip=10.4.2.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.2.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.2.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ekKUHmhTdJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.AbWmCdOkmL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ekKUHmhTdJ ++ cat /tmp/tmp.AbWmCdOkmL ++ rm /tmp/tmp.ekKUHmhTdJ /tmp/tmp.AbWmCdOkmL ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.35D46KLjou +++ mktemp ++ local LAST_ERR=/tmp/tmp.sPyAQOnrg4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.35D46KLjou ++ cat /tmp/tmp.sPyAQOnrg4 ++ rm /tmp/tmp.35D46KLjou /tmp/tmp.sPyAQOnrg4 ++ return 0 + local haproxy_pod_ip=10.4.1.10 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.1.10 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.1.10 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EzUicq3gz7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mQQLKfZpPu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.EzUicq3gz7 ++ cat /tmp/tmp.mQQLKfZpPu ++ rm /tmp/tmp.EzUicq3gz7 /tmp/tmp.mQQLKfZpPu ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BWkQA72wNB +++ mktemp ++ local LAST_ERR=/tmp/tmp.FCNUsujEJl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.BWkQA72wNB ++ cat /tmp/tmp.FCNUsujEJl ++ rm /tmp/tmp.BWkQA72wNB /tmp/tmp.FCNUsujEJl ++ return 0 + local haproxy_pod_ip=10.4.0.15 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.4.0.15 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.4.0.15 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5qoGiQPf9F +++ mktemp ++ local LAST_ERR=/tmp/tmp.vXKjTU63el ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.5qoGiQPf9F ++ cat /tmp/tmp.vXKjTU63el ++ rm /tmp/tmp.5qoGiQPf9F /tmp/tmp.vXKjTU63el ++ return 0 + client_pod=pxc-client-5d749ff8b6-j68dc + wait_pod pxc-client-5d749ff8b6-j68dc + local pod=pxc-client-5d749ff8b6-j68dc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-j68dc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-j68dc.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.nTExTa49cV/server_id_0.sql //tmp/tmp.nTExTa49cV/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.nTExTa49cV/server_id_1.sql //tmp/tmp.nTExTa49cV/server_id_2.sql + desc 'check advanced options are enabled in haproxy statefulset' + set +o xtrace ----------------------------------------------------------------------------------- check advanced options are enabled in haproxy statefulset ----------------------------------------------------------------------------------- + compare_kubectl pdb/haproxy-haproxy + local resource=pdb/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml + local new_result=/tmp/tmp.nTExTa49cV/pdb_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ bc -l ++ echo '1.20 >= 1.21' + '[' 0 -eq 1 ']' + return 1 + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - metadata.resourceVersion + yq d - spec.nodeName + yq d - '**."percona.com/*"' + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.clusterIPs' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.dataSource' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**.procMount' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.volumeName' + yq d - '**.healthCheckNodePort' + yq d - spec.volumeMode + yq d - '**.nodePort' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - status + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - metadata.deletionTimestamp + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.uid' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - spec.ipFamilyPolicy + yq d - 'metadata.ownerReferences.*.apiVersion' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - '**.creationTimestamp' + yq d - spec.ipFamilies + kubectl_bin get -o yaml pdb/haproxy-haproxy + yq d - '**.namespace' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.w6B0tfUZTi ++ mktemp + local LAST_ERR=/tmp/tmp.g68NerBHPQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pdb/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.w6B0tfUZTi + cat /tmp/tmp.g68NerBHPQ + rm /tmp/tmp.w6B0tfUZTi /tmp/tmp.g68NerBHPQ + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml /tmp/tmp.nTExTa49cV/pdb_haproxy-haproxy.yml + compare_kubectl statefulset/haproxy-haproxy + local resource=statefulset/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml + local new_result=/tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ bc -l ++ echo '1.20 >= 1.21' + '[' 0 -eq 1 ']' + return 1 + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - metadata.resourceVersion + yq d - '**.volumeName' + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - metadata.selfLink + yq d - spec.nodeName + yq d - metadata.deletionTimestamp + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."percona.com/*"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.uid' + yq d - '**.namespace' + yq d - '**.creationTimestamp' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.healthCheckNodePort' + yq d - '**.controller-uid' + yq d - spec.ipFamilies + yq d - 'metadata.ownerReferences.*.apiVersion' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - '**.imagePullSecrets' + yq d - '**.preemptionPolicy' + yq d - '**.enableServiceLinks' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - spec.ipFamilyPolicy + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - metadata.managedFields + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.nodePort' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + kubectl_bin get -o yaml statefulset/haproxy-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.8e1LvcvGoT ++ mktemp + local LAST_ERR=/tmp/tmp.oZd2avSQLk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.8e1LvcvGoT + cat /tmp/tmp.oZd2avSQLk + rm /tmp/tmp.8e1LvcvGoT /tmp/tmp.oZd2avSQLk + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml /tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + desc 'default haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- default haproxy-replicas service ----------------------------------------------------------------------------------- + test_default_replicas_service haproxy + kpatch_delete_field pxc haproxy /spec/haproxy/replicasServiceEnabled + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' ++ mktemp + local LAST_OUT=/tmp/tmp.KmDgRsVd9n ++ mktemp + local LAST_ERR=/tmp/tmp.L9OBEKHxxm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.KmDgRsVd9n + cat /tmp/tmp.L9OBEKHxxm The request is invalid + rm /tmp/tmp.KmDgRsVd9n /tmp/tmp.L9OBEKHxxm + return 1 + compare_kubectl service/haproxy-haproxy-replicas + local resource=service/haproxy-haproxy-replicas + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml + local new_result=/tmp/tmp.nTExTa49cV/service_haproxy-haproxy-replicas.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml service/haproxy-haproxy-replicas ++ mktemp + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + local LAST_OUT=/tmp/tmp.S2mgoFV6AS + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."percona.com/*"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.nodeName + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' ++ mktemp + local LAST_ERR=/tmp/tmp.XFcqXkncCH + local exit_status=0 + yq d - spec.volumeMode + yq d - '**.procMount' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.dataSource' + yq d - '**.(name==NAMESPACE)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.(name==suffix)' + yq d - '**.volumeName' + yq d - '**.nodePort' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.enableServiceLinks' + yq d - '**.imagePullSecrets' + yq d - status + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.preemptionPolicy' ++ seq 0 2 + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - metadata.resourceVersion + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + for i in '$(seq 0 2)' + kubectl get -o yaml service/haproxy-haproxy-replicas + yq d - '**.uid' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - '**.namespace' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.S2mgoFV6AS + cat /tmp/tmp.XFcqXkncCH + rm /tmp/tmp.S2mgoFV6AS /tmp/tmp.XFcqXkncCH + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml /tmp/tmp.nTExTa49cV/service_haproxy-haproxy-replicas.yml + desc 'disable haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- disable haproxy-replicas service ----------------------------------------------------------------------------------- + test_disable_replicas_service haproxy + kpatch_set_field pxc haproxy /spec/haproxy/replicasServiceEnabled false + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + local value=false + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.UhYnKmkSqE ++ mktemp + local LAST_ERR=/tmp/tmp.fmYLtZvPYw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": false}]' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.UhYnKmkSqE + cat /tmp/tmp.fmYLtZvPYw + rm /tmp/tmp.UhYnKmkSqE /tmp/tmp.fmYLtZvPYw + return 0 + sleep 1 + wait_for_delete svc/haproxy-haproxy-replicas + local res=svc/haproxy-haproxy-replicas + set +o xtrace svc/haproxy-haproxy-replicas - Error from server (NotFound): services "haproxy-haproxy-replicas" not found + grep -e 'not found$' + desc 'enable haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- enable haproxy-replicas service ----------------------------------------------------------------------------------- + test_enable_replicas_service haproxy + kpatch_set_field pxc haproxy /spec/haproxy/replicasServiceEnabled true + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + local value=true + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.TrYfYur4qx ++ mktemp + local LAST_ERR=/tmp/tmp.xFvjlgP2uz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": true}]' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.TrYfYur4qx + cat /tmp/tmp.xFvjlgP2uz + rm /tmp/tmp.TrYfYur4qx /tmp/tmp.xFvjlgP2uz + return 0 + sleep 1 + compare_kubectl service/haproxy-haproxy-replicas + local resource=service/haproxy-haproxy-replicas + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml + local new_result=/tmp/tmp.nTExTa49cV/service_haproxy-haproxy-replicas.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml service/haproxy-haproxy-replicas + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - spec.nodeName + yq d - spec.volumeMode + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.procMount' + yq d - '**.finalizers' + yq d - '**.storageClassName' + yq d - '**."percona.com/*"' + yq d - '**.enableServiceLinks' + yq d - '**.healthCheckNodePort' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.nodePort' + yq d - status + yq d - '**.imagePullSecrets' + yq d - '**.(name==NAMESPACE)' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.creationTimestamp' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - spec.ipFamilies + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.controller-uid' + yq d - spec.ipFamilyPolicy + yq d - '**.preemptionPolicy' ++ mktemp + local LAST_OUT=/tmp/tmp.nWxaJaD7Cb + yq d - metadata.managedFields ++ mktemp + local LAST_ERR=/tmp/tmp.SwSDM2KYSW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml service/haproxy-haproxy-replicas + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.nWxaJaD7Cb + cat /tmp/tmp.SwSDM2KYSW + rm /tmp/tmp.nWxaJaD7Cb /tmp/tmp.SwSDM2KYSW + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml /tmp/tmp.nTExTa49cV/service_haproxy-haproxy-replicas.yml + desc 'enable proxy-sql' + set +o xtrace ----------------------------------------------------------------------------------- enable proxy-sql ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy-proxysql.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy-proxysql.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy-proxysql.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.MJ1sAISyz6 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + local LAST_ERR=/tmp/tmp.uzwG3kJL8s + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MJ1sAISyz6 perconaxtradbcluster.pxc.percona.com/haproxy configured + cat /tmp/tmp.uzwG3kJL8s + rm /tmp/tmp.MJ1sAISyz6 /tmp/tmp.uzwG3kJL8s + return 0 + wait_for_running haproxy-proxysql 2 + local name=haproxy-proxysql + let last_pod=1 + local max_retry=480 ++ seq 0 1 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-proxysql-0 480 + local pod=haproxy-proxysql-0 + local max_retry=480 + local ns= ++ echo haproxy-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace haproxy-proxysql-0.............Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-proxysql-1 480 + local pod=haproxy-proxysql-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo haproxy-proxysql-1 ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace haproxy-proxysql-1.......Ok + compare_kubectl statefulset/haproxy-proxysql + local resource=statefulset/haproxy-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql.yml + local new_result=/tmp/tmp.nTExTa49cV/statefulset_haproxy-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + yq d - metadata.selfLink + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - metadata.deletionTimestamp + yq d - '**.dataSource' + kubectl_bin get -o yaml statefulset/haproxy-proxysql + yq d - metadata.managedFields + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.clusterIPs' + yq d - spec.nodeName + yq d - '**."percona.com/*"' + yq d - '**.finalizers' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.volumeName' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.clusterIP' + yq d - spec.volumeMode + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.nodePort' ++ mktemp + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - metadata.resourceVersion + yq d - '**.uid' + yq d - '**.enableServiceLinks' + yq d - status + yq d - '**.controller-uid' + yq d - '**.(name==NAMESPACE)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - '**.namespace' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - spec.ipFamilies + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - spec.ipFamilyPolicy + yq d - '**.creationTimestamp' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + local LAST_OUT=/tmp/tmp.mcGK6gtxKO ++ mktemp + local LAST_ERR=/tmp/tmp.n94TRM6NnM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/haproxy-proxysql + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.mcGK6gtxKO + cat /tmp/tmp.n94TRM6NnM + rm /tmp/tmp.mcGK6gtxKO /tmp/tmp.n94TRM6NnM + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql.yml /tmp/tmp.nTExTa49cV/statefulset_haproxy-proxysql.yml + compare_kubectl service/haproxy-proxysql + local resource=service/haproxy-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-proxysql.yml + local new_result=/tmp/tmp.nTExTa49cV/service_haproxy-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-proxysql-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + yq d - metadata.managedFields + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - spec.nodeName + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - spec.volumeMode + yq d - '**.dataSource' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.clusterIP' + yq d - '**."percona.com/*"' + yq d - '**.clusterIPs' + yq d - '**.storageClassName' + yq d - '**.procMount' + yq d - '**.enableServiceLinks' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**.(name==suffix)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.nodePort' + yq d - status + yq d - '**.imagePullSecrets' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - metadata.deletionTimestamp + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.uid' + yq d - '**.namespace' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + yq d - '**.creationTimestamp' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed s/haproxy-26002/namespace/g + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + kubectl_bin get -o yaml service/haproxy-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.RZQ9b9noZI ++ mktemp + local LAST_ERR=/tmp/tmp.EgzQnQJUnh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml service/haproxy-proxysql + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.RZQ9b9noZI + cat /tmp/tmp.EgzQnQJUnh + rm /tmp/tmp.RZQ9b9noZI /tmp/tmp.EgzQnQJUnh + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-proxysql.yml /tmp/tmp.nTExTa49cV/service_haproxy-proxysql.yml + compare_mysql_cmd_local ping_timeout_server 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 '' -cproxysql + local command_id=ping_timeout_server + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local postfix= + local container_name=-cproxysql + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server-80.sql ']' + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 -cproxysql + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name=-cproxysql + set +o xtrace + '[' '!' -s /tmp/tmp.nTExTa49cV/ping_timeout_server.sql ']' + sleep 20 + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name= + set +o xtrace + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server.sql /tmp/tmp.nTExTa49cV/ping_timeout_server.sql + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-proxysql.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-proxysql.yaml + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + local LAST_OUT=/tmp/tmp.5zTT9Ouoeb + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-proxysql.yaml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.Iuy9Do36Q3 + local exit_status=0 + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.5zTT9Ouoeb secret/haproxy-proxysql created + cat /tmp/tmp.Iuy9Do36Q3 + rm /tmp/tmp.5zTT9Ouoeb /tmp/tmp.Iuy9Do36Q3 + return 0 + wait_cluster_consistency haproxy 3 2 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t3hkv2cxhI +++ mktemp ++ local LAST_ERR=/tmp/tmp.JWwNgscRoT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.t3hkv2cxhI ++ cat /tmp/tmp.JWwNgscRoT ++ rm /tmp/tmp.t3hkv2cxhI /tmp/tmp.JWwNgscRoT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.id2IHeTF3v +++ mktemp ++ local LAST_ERR=/tmp/tmp.ogQJAM6SdD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.id2IHeTF3v ++ cat /tmp/tmp.ogQJAM6SdD ++ rm /tmp/tmp.id2IHeTF3v /tmp/tmp.ogQJAM6SdD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FOcHbiX060 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dUgsaDoM0Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.FOcHbiX060 ++ cat /tmp/tmp.dUgsaDoM0Y ++ rm /tmp/tmp.FOcHbiX060 /tmp/tmp.dUgsaDoM0Y ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZuTRrxWaRC +++ mktemp ++ local LAST_ERR=/tmp/tmp.sf5EJVZx8l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ZuTRrxWaRC ++ cat /tmp/tmp.sf5EJVZx8l ++ rm /tmp/tmp.ZuTRrxWaRC /tmp/tmp.sf5EJVZx8l ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.SIYJKQqjdn ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.1xcHSFfGmJ +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.SIYJKQqjdn +++++ cat /tmp/tmp.1xcHSFfGmJ +++++ rm /tmp/tmp.SIYJKQqjdn /tmp/tmp.1xcHSFfGmJ +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.HO3oe3mNi6 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.5lUahD7UEU +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.HO3oe3mNi6 +++++ cat /tmp/tmp.5lUahD7UEU +++++ rm /tmp/tmp.HO3oe3mNi6 /tmp/tmp.5lUahD7UEU +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-proxysql ++++ return +++ local cluster_proxy=haproxy-proxysql +++ echo proxysql ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2jQSLgo24v +++ mktemp ++ local LAST_ERR=/tmp/tmp.VoeKupRGWX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.2jQSLgo24v ++ cat /tmp/tmp.VoeKupRGWX ++ rm /tmp/tmp.2jQSLgo24v /tmp/tmp.VoeKupRGWX ++ return 0 + [[ 2 == \2 ]] + compare_kubectl statefulset/haproxy-proxysql -secret + local resource=statefulset/haproxy-proxysql + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret.yml + local new_result=/tmp/tmp.nTExTa49cV/statefulset_haproxy-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/haproxy-proxysql + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.clusterIP' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.clusterIPs' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.dataSource' + yq d - '**.volumeName' + yq d - '**.storageClassName' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.procMount' + yq d - spec.volumeMode + yq d - '**.finalizers' + yq d - spec.nodeName + yq d - '**."percona.com/*"' + yq d - '**.(name==suffix)' + yq d - '**.nodePort' + yq d - '**.(name==NAMESPACE)' + yq d - '**.imagePullSecrets' + yq d - '**.healthCheckNodePort' + yq d - status + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.enableServiceLinks' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - spec.ipFamilies + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.preemptionPolicy' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.creationTimestamp' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.rlpdy9WbVe ++ mktemp + local LAST_ERR=/tmp/tmp.AAte059c0Q + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/haproxy-proxysql + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.rlpdy9WbVe + cat /tmp/tmp.AAte059c0Q + rm /tmp/tmp.rlpdy9WbVe /tmp/tmp.AAte059c0Q + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret.yml /tmp/tmp.nTExTa49cV/statefulset_haproxy-proxysql.yml + compare_mysql_cmd_local ping_timeout_server 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 -secret -cproxysql + local command_id=ping_timeout_server + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local postfix=-secret + local container_name=-cproxysql + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server-secret.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server-secret-80.sql ']' + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 -cproxysql + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name=-cproxysql + set +o xtrace + '[' '!' -s /tmp/tmp.nTExTa49cV/ping_timeout_server.sql ']' + sleep 20 + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name= + set +o xtrace + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/ping_timeout_server-secret.sql /tmp/tmp.nTExTa49cV/ping_timeout_server.sql + wait_cluster_consistency haproxy 3 2 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNALsKyBy5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ciWdCIzqQs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.aNALsKyBy5 ++ cat /tmp/tmp.ciWdCIzqQs ++ rm /tmp/tmp.aNALsKyBy5 /tmp/tmp.ciWdCIzqQs ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BVUHP1JlFL +++ mktemp ++ local LAST_ERR=/tmp/tmp.wMwAVrGGdn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.BVUHP1JlFL ++ cat /tmp/tmp.wMwAVrGGdn ++ rm /tmp/tmp.BVUHP1JlFL /tmp/tmp.wMwAVrGGdn ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.nLNDGYfQ44 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.gzViyjGGVj +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.nLNDGYfQ44 +++++ cat /tmp/tmp.gzViyjGGVj +++++ rm /tmp/tmp.nLNDGYfQ44 /tmp/tmp.gzViyjGGVj +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LWXESmEDQy ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.p4imsabgtr +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.LWXESmEDQy +++++ cat /tmp/tmp.p4imsabgtr +++++ rm /tmp/tmp.LWXESmEDQy /tmp/tmp.p4imsabgtr +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-proxysql ++++ return +++ local cluster_proxy=haproxy-proxysql +++ echo proxysql ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GqHZgWG670 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7aCpCAucEC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.GqHZgWG670 ++ cat /tmp/tmp.7aCpCAucEC ++ rm /tmp/tmp.GqHZgWG670 /tmp/tmp.7aCpCAucEC ++ return 0 + [[ 2 == \2 ]] + desc 're-enable haproxy' + set +o xtrace ----------------------------------------------------------------------------------- re-enable haproxy ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rGOZT8eHzv + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/haproxy.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_ERR=/tmp/tmp.MFdDEhOgK9 + local exit_status=0 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.rGOZT8eHzv perconaxtradbcluster.pxc.percona.com/haproxy configured + cat /tmp/tmp.MFdDEhOgK9 + rm /tmp/tmp.rGOZT8eHzv /tmp/tmp.MFdDEhOgK9 + return 0 + wait_for_running haproxy-haproxy 3 + local name=haproxy-haproxy + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-0 480 + local pod=haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-0.........error: a container name must be specified for pod haproxy-haproxy-0, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-1 480 + local pod=haproxy-haproxy-1 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-1.........error: a container name must be specified for pod haproxy-haproxy-1, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-2 480 + local pod=haproxy-haproxy-2 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-2......error: a container name must be specified for pod haproxy-haproxy-2, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + compare_kubectl statefulset/haproxy-haproxy + local resource=statefulset/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml + local new_result=/tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/haproxy-haproxy + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.volumeName' + yq d - spec.volumeMode + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - spec.nodeName + yq d - '**."percona.com/*"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.storageClassName' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.imagePullSecrets' + yq d - '**.creationTimestamp' + yq d - '**.enableServiceLinks' + yq d - '**.image' + yq d - status + yq d - '**.procMount' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.healthCheckNodePort' + yq d - '**.finalizers' + yq d - '**.nodePort' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.(name==suffix)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - '**.creationTimestamp' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.AfwgGP8I08 ++ mktemp + local LAST_ERR=/tmp/tmp.L4TEyOj5dB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.AfwgGP8I08 + cat /tmp/tmp.L4TEyOj5dB + rm /tmp/tmp.AfwgGP8I08 /tmp/tmp.L4TEyOj5dB + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml /tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + compare_kubectl service/haproxy-haproxy + local resource=service/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy.yml + local new_result=/tmp/tmp.nTExTa49cV/service_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml service/haproxy-haproxy + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.volumeName' + yq d - '**."percona.com/*"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.healthCheckNodePort' + yq d - '**.dataSource' + yq d - '**.nodePort' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.imagePullSecrets' + yq d - '**.creationTimestamp' + yq d - '**.enableServiceLinks' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - '**.image' + yq d - '**.(name==suffix)' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.storageClassName' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.procMount' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.preemptionPolicy' + yq d - '**.creationTimestamp' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.eLcc5Mim3k ++ mktemp + local LAST_ERR=/tmp/tmp.ATtupRDKOU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml service/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.eLcc5Mim3k + cat /tmp/tmp.ATtupRDKOU + rm /tmp/tmp.eLcc5Mim3k /tmp/tmp.ATtupRDKOU + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/service_haproxy-haproxy.yml /tmp/tmp.nTExTa49cV/service_haproxy-haproxy.yml + compare_kubectl pdb/haproxy-haproxy + local resource=pdb/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml + local new_result=/tmp/tmp.nTExTa49cV/pdb_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml pdb/haproxy-haproxy + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - spec.nodeName + yq d - '**.finalizers' + yq d - '**."percona.com/*"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.imagePullSecrets' + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.creationTimestamp' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.(name==suffix)' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**.enableServiceLinks' + yq d - spec.volumeMode + yq d - '**.nodePort' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - '**.creationTimestamp' + yq d - '**.preemptionPolicy' + yq d - '**.controller-uid' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.HsUWn4WHyf ++ mktemp + local LAST_ERR=/tmp/tmp.EQx3q8Pt9h + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pdb/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.HsUWn4WHyf + cat /tmp/tmp.EQx3q8Pt9h + rm /tmp/tmp.HsUWn4WHyf /tmp/tmp.EQx3q8Pt9h + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml /tmp/tmp.nTExTa49cV/pdb_haproxy-haproxy.yml + kubectl_bin exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + grep Maxconn: ++ mktemp + local LAST_OUT=/tmp/tmp.NEBisUrb7E ++ mktemp + local LAST_ERR=/tmp/tmp.t4pxtDKoOx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.NEBisUrb7E + cat /tmp/tmp.t4pxtDKoOx Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.NEBisUrb7E /tmp/tmp.t4pxtDKoOx + return 0 + diff --strip-trailing-cr /tmp/tmp.nTExTa49cV/haproxy_maxconn.txt /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/haproxy_maxconn.txt + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-haproxy.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-haproxy.yaml + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-26002~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + local LAST_OUT=/tmp/tmp.QbztDgt1aW + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/conf/config-secret-haproxy.yaml ++ mktemp + local LAST_ERR=/tmp/tmp.4Y1II5wpID + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.QbztDgt1aW secret/haproxy-haproxy created + cat /tmp/tmp.4Y1II5wpID + rm /tmp/tmp.QbztDgt1aW /tmp/tmp.4Y1II5wpID + return 0 + wait_cluster_consistency haproxy 3 3 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C1oW6IInt4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aS7VPZtvpd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.C1oW6IInt4 ++ cat /tmp/tmp.aS7VPZtvpd ++ rm /tmp/tmp.C1oW6IInt4 /tmp/tmp.aS7VPZtvpd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bNpmCUC4Jg +++ mktemp ++ local LAST_ERR=/tmp/tmp.6abzRzVa9t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.bNpmCUC4Jg ++ cat /tmp/tmp.6abzRzVa9t ++ rm /tmp/tmp.bNpmCUC4Jg /tmp/tmp.6abzRzVa9t ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3xJKdtx3Yr +++ mktemp ++ local LAST_ERR=/tmp/tmp.NB7it8P37X ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.3xJKdtx3Yr ++ cat /tmp/tmp.NB7it8P37X ++ rm /tmp/tmp.3xJKdtx3Yr /tmp/tmp.NB7it8P37X ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ZZmiIp8eL +++ mktemp ++ local LAST_ERR=/tmp/tmp.KeztdmnTN1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.5ZZmiIp8eL ++ cat /tmp/tmp.KeztdmnTN1 ++ rm /tmp/tmp.5ZZmiIp8eL /tmp/tmp.KeztdmnTN1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BFglqhbtiR +++ mktemp ++ local LAST_ERR=/tmp/tmp.5HtVebOfBb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.BFglqhbtiR ++ cat /tmp/tmp.5HtVebOfBb ++ rm /tmp/tmp.BFglqhbtiR /tmp/tmp.5HtVebOfBb ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o0D7Xu7evs +++ mktemp ++ local LAST_ERR=/tmp/tmp.zqMAPWysVb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.o0D7Xu7evs ++ cat /tmp/tmp.zqMAPWysVb ++ rm /tmp/tmp.o0D7Xu7evs /tmp/tmp.zqMAPWysVb ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ZWjCoZfEAi ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ObrqrqZta6 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.ZWjCoZfEAi +++++ cat /tmp/tmp.ObrqrqZta6 +++++ rm /tmp/tmp.ZWjCoZfEAi /tmp/tmp.ObrqrqZta6 +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-haproxy ++++ return +++ local cluster_proxy=haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eMxuTC8yuV +++ mktemp ++ local LAST_ERR=/tmp/tmp.mnJrw4sNoR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.eMxuTC8yuV ++ cat /tmp/tmp.mnJrw4sNoR ++ rm /tmp/tmp.eMxuTC8yuV /tmp/tmp.mnJrw4sNoR ++ return 0 + [[ 3 == \3 ]] + compare_kubectl statefulset/haproxy-haproxy -secret + local resource=statefulset/haproxy-haproxy + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret.yml + local new_result=/tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/haproxy-haproxy + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - spec.volumeMode + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.healthCheckNodePort' + yq d - '**.finalizers' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.volumeName' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.imagePullSecrets' + yq d - '**.nodePort' + yq d - '**.enableServiceLinks' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.controller-uid' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - status + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - spec.ipFamilies + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.creationTimestamp' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/haproxy-26002/namespace/g + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.GtO8K68fgd ++ mktemp + local LAST_ERR=/tmp/tmp.qrnADnmcxh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GtO8K68fgd + cat /tmp/tmp.qrnADnmcxh + rm /tmp/tmp.GtO8K68fgd /tmp/tmp.qrnADnmcxh + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret.yml /tmp/tmp.nTExTa49cV/statefulset_haproxy-haproxy.yml + kubectl_bin exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + grep Maxconn: ++ mktemp + local LAST_OUT=/tmp/tmp.13RP4ZhjNy ++ mktemp + local LAST_ERR=/tmp/tmp.Kd9XmFEUzw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.13RP4ZhjNy + cat /tmp/tmp.Kd9XmFEUzw Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.13RP4ZhjNy /tmp/tmp.Kd9XmFEUzw + return 0 + diff --strip-trailing-cr /tmp/tmp.nTExTa49cV/haproxy_maxconn.txt /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/haproxy/compare/haproxy_maxconn-secret.txt + desc 'clean up' + set +o xtrace ----------------------------------------------------------------------------------- clean up ----------------------------------------------------------------------------------- + destroy haproxy-26002 + local namespace=haproxy-26002 + local ignore_logs=false + [[ false == \f\a\l\s\e ]] + grep -v 'get backup status: Job.batch' + grep -v 'the object has been modified' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.nTExTa49cV/operator.log ++ get_operator_pod + grep -v level=info ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.VklBcCARgG +++ mktemp ++ local LAST_ERR=/tmp/tmp.4P4Y8N9ud4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.VklBcCARgG ++ cat /tmp/tmp.4P4Y8N9ud4 ++ rm /tmp/tmp.VklBcCARgG /tmp/tmp.4P4Y8N9ud4 ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-rzhv4 ++ mktemp + local LAST_OUT=/tmp/tmp.nbQ3szB5Lk ++ mktemp + local LAST_ERR=/tmp/tmp.jj1vcY707m + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-rzhv4 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.nbQ3szB5Lk + cat /tmp/tmp.jj1vcY707m + rm /tmp/tmp.nbQ3szB5Lk /tmp/tmp.jj1vcY707m + return 0 I0412 10:53:49.622617 1 request.go:645] Throttling request took 1.045723998s, request: GET:https://10.7.240.1:443/apis/pxc.percona.com/v1-8-0?timeout=32s I0412 10:54:19.961298 1 request.go:645] Throttling request took 1.037237578s, request: GET:https://10.7.240.1:443/apis/metrics.k8s.io/v1beta1?timeout=32s {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: command terminated with exit code 1 / / ERROR (line:633) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n","errorVerbose":"exec syncusers: command terminated with exit code 1 / / ERROR (line:633) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: command terminated with exit code 1 / \nSyncing user accounts from PXC(haproxy-pxc-0.haproxy-pxc.haproxy-26002.svc.cluster.local:3306) to ProxySQL\nAdding user to ProxySQL: clustercheck\n Added query rule for user: clustercheck\nAdding user to ProxySQL: monitor\n Added query rule for user: monitor\nAdding user to ProxySQL: operator\n Added query rule for user: operator\nAdding user to ProxySQL: root\n / ERROR 1045 (28000) at line 1: ProxySQL Admin Error: UNIQUE constraint failed: mysql_users.username, mysql_users.frontend\nERROR (line:2037) : Failed to add the user (root) from PXC to ProxySQL database. \n-- Please check the ProxySQL connection parameters and status.\n","errorVerbose":"exec syncusers: command terminated with exit code 1 / \nSyncing user accounts from PXC(haproxy-pxc-0.haproxy-pxc.haproxy-26002.svc.cluster.local:3306) to ProxySQL\nAdding user to ProxySQL: clustercheck\n Added query rule for user: clustercheck\nAdding user to ProxySQL: monitor\n Added query rule for user: monitor\nAdding user to ProxySQL: operator\n Added query rule for user: operator\nAdding user to ProxySQL: root\n / ERROR 1045 (28000) at line 1: ProxySQL Admin Error: UNIQUE constraint failed: mysql_users.username, mysql_users.frontend\nERROR (line:2037) : Failed to add the user (root) from PXC to ProxySQL database. \n-- Please check the ProxySQL connection parameters and status.\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / ","errorVerbose":"exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"failed to connect to pod haproxy-pxc-0: dial tcp: lookup haproxy-pxc-0.haproxy-pxc.haproxy-26002 on 10.7.240.10:53: no such host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: failed to get proxySQL db: dial tcp 10.4.1.12:6032: connect: no route to host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: failed to get proxySQL db: dial tcp 10.7.251.129:3306: connect: connection refused"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: failed to get proxySQL db: dial tcp: lookup haproxy-proxysql-unready.haproxy-26002 on 10.7.240.10:53: no such host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: not found"} {"level":"info",,"caller":"pxc/version.go:328","msg":"update PXC version (fetched from db)","new version":"8.0.27-18.1"} {"level":"info",,"caller":"v1/pxc_types.go:1034","msg":"Sidecar container name cannot be haproxy. It's skipped"} {"level":"info",,"caller":"v1/pxc_types.go:874","msg":"ProxySQL size will be changed from 1 to 2 due to safe config"} {"level":"info",,"caller":"v1/pxc_types.go:875","msg":"Set allowUnsafeConfigurations=true to disable safe configuration"} {"level":"info",,"logger":"cmd","msg":"Git commit: 706f792ae47c369cb3556faff186b6873a8a247f Git branch: PR-1125-706f792a Build time: 2022-04-12T09:09:41Z"} {"level":"info",,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info",,"logger":"cmd","msg":"Go Version: go1.17.8"} {"level":"info",,"logger":"cmd","msg":"operator-sdk Version: v0.19.4"} {"level":"info",,"logger":"cmd","msg":"Registering Components."} {"level":"info",,"logger":"cmd","msg":"Runs on","platform":"kubernetes","version":"v1.20.15-gke.4100"} {"level":"info",,"logger":"cmd","msg":"Starting the Cmd."} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} {"level":"info",,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"registering webhook","path":"/validate-percona-xtradbcluster"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"serving webhook server","host":"","port":9443} {"level":"info",,"logger":"controller-runtime.webhook.webhooks","msg":"starting webhook server"} {"level":"info",,"logger":"leader","msg":"Became the leader."} {"level":"info",,"logger":"leader","msg":"No pre-existing lock was found."} {"level":"info",,"logger":"leader","msg":"Trying to become the leader."} [mysql] 2022/04/12 10:59:48 packets.go:36: read tcp 10.4.0.12:50508->10.7.243.131:3306: i/o timeout + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n haproxy-26002 haproxy --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/haproxy patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.BBos4gcmxX ++ mktemp + local LAST_ERR=/tmp/tmp.Ip7amA1iqp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.BBos4gcmxX perconaxtradbcluster.pxc.percona.com "haproxy" deleted + cat /tmp/tmp.Ip7amA1iqp + rm /tmp/tmp.BBos4gcmxX /tmp/tmp.Ip7amA1iqp + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.nq3JCM9zAP ++ mktemp + local LAST_ERR=/tmp/tmp.yAPqDvFeLg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.nq3JCM9zAP No resources found + cat /tmp/tmp.yAPqDvFeLg + rm /tmp/tmp.nq3JCM9zAP /tmp/tmp.yAPqDvFeLg + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.uvS0AYd3fH ++ mktemp + local LAST_ERR=/tmp/tmp.68gV0f8jtB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.uvS0AYd3fH No resources found + cat /tmp/tmp.68gV0f8jtB + rm /tmp/tmp.uvS0AYd3fH /tmp/tmp.68gV0f8jtB + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.vixg6PyVCW ++ mktemp + local LAST_ERR=/tmp/tmp.emKuEoi7gu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.vixg6PyVCW validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.emKuEoi7gu + rm /tmp/tmp.vixg6PyVCW /tmp/tmp.emKuEoi7gu + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace haproxy-26002 + rm -rf /tmp/tmp.nTExTa49cV + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jJzqeWG1vK ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.xFlOpXflEr + local LAST_ERR=/tmp/tmp.Pb2QFOGNIz + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.co3AnbQS43 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace haproxy-26002 + kubectl delete --grace-period=0 --force=true namespace pxc-operator