Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/logs/haproxy-5-7.log + main + create_infra haproxy-21703 + local ns=haproxy-21703 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n haproxy-18093 haproxy --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/haproxy patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.JA4jJOD5Hf ++ mktemp + local LAST_ERR=/tmp/tmp.PwHX7mYdBB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JA4jJOD5Hf perconaxtradbcluster.pxc.percona.com "haproxy" deleted + cat /tmp/tmp.PwHX7mYdBB + rm /tmp/tmp.JA4jJOD5Hf /tmp/tmp.PwHX7mYdBB + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ozXn6F0no5 ++ mktemp + local LAST_ERR=/tmp/tmp.GkGnu61Avg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ozXn6F0no5 No resources found + cat /tmp/tmp.GkGnu61Avg + rm /tmp/tmp.ozXn6F0no5 /tmp/tmp.GkGnu61Avg + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tssRiHwq9s ++ mktemp + local LAST_ERR=/tmp/tmp.gLYBIJ6C79 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tssRiHwq9s No resources found + cat /tmp/tmp.gLYBIJ6C79 + rm /tmp/tmp.tssRiHwq9s /tmp/tmp.gLYBIJ6C79 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + kubectl_bin get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ZYXSZnDzAl + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.Y0dUNVCt9S ++ mktemp + local LAST_ERR=/tmp/tmp.Isb7CTFB82 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.PxN9TJCs0j + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y0dUNVCt9S + cat /tmp/tmp.Isb7CTFB82 + rm /tmp/tmp.Y0dUNVCt9S /tmp/tmp.Isb7CTFB82 + return 0 namespace "haproxy-18093" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZYXSZnDzAl namespace "pxc-operator" deleted + cat /tmp/tmp.PxN9TJCs0j + rm /tmp/tmp.ZYXSZnDzAl /tmp/tmp.PxN9TJCs0j + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fkqKmAGxBU ++ mktemp + local LAST_ERR=/tmp/tmp.RHMMxuChEU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fkqKmAGxBU namespace/pxc-operator created + cat /tmp/tmp.RHMMxuChEU + rm /tmp/tmp.fkqKmAGxBU /tmp/tmp.RHMMxuChEU + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.PyrKeWAeYv +++ mktemp ++ local LAST_ERR=/tmp/tmp.FLgkeMKmOB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PyrKeWAeYv ++ cat /tmp/tmp.FLgkeMKmOB ++ rm /tmp/tmp.PyrKeWAeYv /tmp/tmp.FLgkeMKmOB ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.T9VU2lrMN0 ++ mktemp + local LAST_ERR=/tmp/tmp.mrwmAaaIFH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.T9VU2lrMN0 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7" modified. + cat /tmp/tmp.mrwmAaaIFH + rm /tmp/tmp.T9VU2lrMN0 /tmp/tmp.mrwmAaaIFH + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ZSU53GIrIJ ++ mktemp + local LAST_ERR=/tmp/tmp.LFTq5wvtoN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZSU53GIrIJ customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.LFTq5wvtoN + rm /tmp/tmp.ZSU53GIrIJ /tmp/tmp.LFTq5wvtoN + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.7dIgXpDkdE ++ mktemp + local LAST_ERR=/tmp/tmp.ewJgmPZKVd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7dIgXpDkdE clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.ewJgmPZKVd + rm /tmp/tmp.7dIgXpDkdE /tmp/tmp.ewJgmPZKVd + return 0 + kubectl_bin apply -f - ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + local LAST_OUT=/tmp/tmp.nnYWLGRMSk ++ mktemp + local LAST_ERR=/tmp/tmp.a4Zz4NIJtb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4^' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nnYWLGRMSk deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.a4Zz4NIJtb + rm /tmp/tmp.nnYWLGRMSk /tmp/tmp.a4Zz4NIJtb + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.fjeUyTfSMv +++ mktemp ++ local LAST_ERR=/tmp/tmp.GHIB4ulp3d ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fjeUyTfSMv ++ cat /tmp/tmp.GHIB4ulp3d ++ rm /tmp/tmp.fjeUyTfSMv /tmp/tmp.GHIB4ulp3d ++ return 0 + wait_pod percona-xtradb-cluster-operator-58bddf54b8-pz69b 480 pxc-operator + local pod=percona-xtradb-cluster-operator-58bddf54b8-pz69b + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-58bddf54b8-pz69b ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace percona-xtradb-cluster-operator-58bddf54b8-pz69b.Ok + sleep 3 + create_namespace haproxy-21703 + local namespace=haproxy-21703 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces haproxy-21703' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces haproxy-21703 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace haproxy-21703 + xargs kubectl delete ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + awk '{print$1}' + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.2EDlXyYZWs ++ mktemp + local LAST_OUT=/tmp/tmp.hichM7YdOH ++ mktemp + local LAST_ERR=/tmp/tmp.hrIfIgubbQ + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.5Oieua8oNA + local exit_status=0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-21703 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hichM7YdOH + cat /tmp/tmp.5Oieua8oNA + rm /tmp/tmp.hichM7YdOH /tmp/tmp.5Oieua8oNA + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-21703 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace haproxy-21703 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.2EDlXyYZWs + cat /tmp/tmp.hrIfIgubbQ Error from server (NotFound): namespaces "haproxy-21703" not found + rm /tmp/tmp.2EDlXyYZWs /tmp/tmp.hrIfIgubbQ + return 1 + : + wait_for_delete namespace/haproxy-21703 + local res=namespace/haproxy-21703 + echo -n 'namespace/haproxy-21703 - ' namespace/haproxy-21703 - + set +o xtrace Error from server (NotFound): namespaces "haproxy-21703" not found + desc 'create namespace haproxy-21703' + set +o xtrace ----------------------------------------------------------------------------------- create namespace haproxy-21703 ----------------------------------------------------------------------------------- + kubectl_bin create namespace haproxy-21703 ++ mktemp + local LAST_OUT=/tmp/tmp.daWFmJsbnr ++ mktemp + local LAST_ERR=/tmp/tmp.kRVhDjWCgN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace haproxy-21703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.daWFmJsbnr namespace/haproxy-21703 created + cat /tmp/tmp.kRVhDjWCgN + rm /tmp/tmp.daWFmJsbnr /tmp/tmp.kRVhDjWCgN + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.i0dyC8vgBG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ugsV2KUKkD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i0dyC8vgBG ++ cat /tmp/tmp.ugsV2KUKkD ++ rm /tmp/tmp.i0dyC8vgBG /tmp/tmp.ugsV2KUKkD ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7 --namespace=haproxy-21703 ++ mktemp + local LAST_OUT=/tmp/tmp.fjkT1pwbEt ++ mktemp + local LAST_ERR=/tmp/tmp.6OkIlzOzog + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7 --namespace=haproxy-21703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fjkT1pwbEt Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1131-d64e70d4-2-cluster7" modified. + cat /tmp/tmp.6OkIlzOzog + rm /tmp/tmp.fjkT1pwbEt /tmp/tmp.6OkIlzOzog + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.GkyR6UuCEB ++ mktemp + local LAST_ERR=/tmp/tmp.xAOdTNYfo3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GkyR6UuCEB secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.xAOdTNYfo3 + rm /tmp/tmp.GkyR6UuCEB /tmp/tmp.xAOdTNYfo3 + return 0 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.19 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/container-rc.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RWWeRBdwFf ++ mktemp + local LAST_ERR=/tmp/tmp.9fGYBsA2iC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RWWeRBdwFf runtimeclass.node.k8s.io/docker-rc unchanged + cat /tmp/tmp.9fGYBsA2iC Warning: node.k8s.io/v1beta1 RuntimeClass is deprecated in v1.22+, unavailable in v1.25+ + rm /tmp/tmp.RWWeRBdwFf /tmp/tmp.9fGYBsA2iC + return 0 + desc 'create first PXC cluster with HAProxy' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster with HAProxy ----------------------------------------------------------------------------------- + cluster=haproxy + spinup_pxc haproxy /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml 3 10 + local cluster=haproxy + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ujPjhNtGgJ ++ mktemp + local LAST_ERR=/tmp/tmp.9okibfOpTy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ujPjhNtGgJ secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.9okibfOpTy + rm /tmp/tmp.ujPjhNtGgJ /tmp/tmp.9okibfOpTy + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/conf/client.yml + local LAST_OUT=/tmp/tmp.c2Jwti9lyU + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' ++ mktemp + local LAST_ERR=/tmp/tmp.9NcusHFQgq + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c2Jwti9lyU deployment.apps/pxc-client created + cat /tmp/tmp.9NcusHFQgq + rm /tmp/tmp.c2Jwti9lyU /tmp/tmp.9NcusHFQgq + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ haproxy == \d\e\m\a\n\d\-\b\a\c\k\u\p ]] + [[ haproxy == \d\e\m\a\n\d\-\b\a\c\k\u\p\-\c\l\o\u\d ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + local LAST_OUT=/tmp/tmp.sHJS9JUuqH + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.VJcaJkixwL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sHJS9JUuqH perconaxtradbcluster.pxc.percona.com/haproxy created + cat /tmp/tmp.VJcaJkixwL + rm /tmp/tmp.sHJS9JUuqH /tmp/tmp.VJcaJkixwL + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy haproxy ++ local target_cluster=haproxy +++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jYNm7EEG92 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Vtv86rXunP +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.jYNm7EEG92 +++ cat /tmp/tmp.Vtv86rXunP +++ rm /tmp/tmp.jYNm7EEG92 /tmp/tmp.Vtv86rXunP +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo haproxy-haproxy ++ return + local proxy=haproxy-haproxy + wait_for_running haproxy-haproxy 1 + local name=haproxy-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-0 480 + local pod=haproxy-haproxy-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo haproxy-haproxy-0 + local container= + set +o xtrace haproxy-haproxy-0.....................error: a container name must be specified for pod haproxy-haproxy-0, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo haproxy-pxc-0 + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo haproxy-pxc-1 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-1...........Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-2...................Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] ++ is_keyring_plugin_in_use haproxy ++ local cluster=haproxy ++ kubectl_bin exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RuxapI4hOY +++ mktemp ++ local LAST_ERR=/tmp/tmp.wv7ceyTpI1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RuxapI4hOY ++ cat /tmp/tmp.wv7ceyTpI1 Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.RuxapI4hOY /tmp/tmp.wv7ceyTpI1 ++ return 0 + [[ -n '' ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h haproxy-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ai5TZTtjT3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KwTwqiuf7p ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ai5TZTtjT3 ++ cat /tmp/tmp.KwTwqiuf7p ++ rm /tmp/tmp.Ai5TZTtjT3 /tmp/tmp.KwTwqiuf7p ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h haproxy-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HLfZ2E8FeP +++ mktemp ++ local LAST_ERR=/tmp/tmp.4OeY778Xmg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.HLfZ2E8FeP ++ cat /tmp/tmp.4OeY778Xmg ++ rm /tmp/tmp.HLfZ2E8FeP /tmp/tmp.4OeY778Xmg ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-778b75bfb4-4gd4b + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-0.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P40dtcysIe +++ mktemp ++ local LAST_ERR=/tmp/tmp.PNlAzazdZl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.P40dtcysIe ++ cat /tmp/tmp.PNlAzazdZl ++ rm /tmp/tmp.P40dtcysIe /tmp/tmp.PNlAzazdZl ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j6dTYMCuSE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.j6dTYMCuSE/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-1.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q13132mbnn +++ mktemp ++ local LAST_ERR=/tmp/tmp.QRSbD54lUv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.q13132mbnn ++ cat /tmp/tmp.QRSbD54lUv ++ rm /tmp/tmp.q13132mbnn /tmp/tmp.QRSbD54lUv ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j6dTYMCuSE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.j6dTYMCuSE/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h haproxy-pxc-2.haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CdY9BIE9jN +++ mktemp ++ local LAST_ERR=/tmp/tmp.EpxrE4Mwxb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CdY9BIE9jN ++ cat /tmp/tmp.EpxrE4Mwxb ++ rm /tmp/tmp.CdY9BIE9jN /tmp/tmp.EpxrE4Mwxb ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j6dTYMCuSE/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/select-1.sql /tmp/tmp.j6dTYMCuSE/select-1.sql ++ is_keyring_plugin_in_use haproxy ++ local cluster=haproxy ++ kubectl_bin exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6Jphg19uT6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qQbBwYrGiE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6Jphg19uT6 ++ cat /tmp/tmp.qQbBwYrGiE Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.6Jphg19uT6 /tmp/tmp.qQbBwYrGiE ++ return 0 + '[' '' ']' + desc 'checking all haproxy pods point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- checking all haproxy pods point to the same writer ----------------------------------------------------------------------------------- + wait_for_running haproxy-pxc 3 + local name=haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-0 480 + local pod=haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-1 480 + local pod=haproxy-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo haproxy-pxc-1 ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace haproxy-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-pxc-2 480 + local pod=haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace haproxy-pxc-2.Ok + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6wHkdfZCxH +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z89qKs7Ik0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6wHkdfZCxH ++ cat /tmp/tmp.Z89qKs7Ik0 ++ rm /tmp/tmp.6wHkdfZCxH /tmp/tmp.Z89qKs7Ik0 ++ return 0 + local haproxy_pod_ip=10.133.137.8 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.137.8 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.137.8 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Answddq9C +++ mktemp ++ local LAST_ERR=/tmp/tmp.8c97Lw6a6y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0Answddq9C ++ cat /tmp/tmp.8c97Lw6a6y ++ rm /tmp/tmp.0Answddq9C /tmp/tmp.8c97Lw6a6y ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-778b75bfb4-4gd4b ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JVLWa5Tx36 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nQ5HMs8EIe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JVLWa5Tx36 ++ cat /tmp/tmp.nQ5HMs8EIe ++ rm /tmp/tmp.JVLWa5Tx36 /tmp/tmp.nQ5HMs8EIe ++ return 0 + local haproxy_pod_ip=10.133.136.12 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.136.12 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.136.12 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uh7VnwP8Bl +++ mktemp ++ local LAST_ERR=/tmp/tmp.XouvbmhMA5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Uh7VnwP8Bl ++ cat /tmp/tmp.XouvbmhMA5 ++ rm /tmp/tmp.Uh7VnwP8Bl /tmp/tmp.XouvbmhMA5 ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WIgLvKquTJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.GN1hHHMDiV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WIgLvKquTJ ++ cat /tmp/tmp.GN1hHHMDiV ++ rm /tmp/tmp.WIgLvKquTJ /tmp/tmp.GN1hHHMDiV ++ return 0 + local haproxy_pod_ip=10.133.138.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.138.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.138.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SxZQZ5DnCY +++ mktemp ++ local LAST_ERR=/tmp/tmp.A7wBtPt4e5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SxZQZ5DnCY ++ cat /tmp/tmp.A7wBtPt4e5 ++ rm /tmp/tmp.SxZQZ5DnCY /tmp/tmp.A7wBtPt4e5 ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-778b75bfb4-4gd4b + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.j6dTYMCuSE/server_id_0.sql //tmp/tmp.j6dTYMCuSE/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.j6dTYMCuSE/server_id_1.sql //tmp/tmp.j6dTYMCuSE/server_id_2.sql + desc 'delete active writer and checking all haproxy pods still point to the same writer' + set +o xtrace ----------------------------------------------------------------------------------- delete active writer and checking all haproxy pods still point to the same writer ----------------------------------------------------------------------------------- + kubectl_bin delete pod haproxy-pxc-0 ++ mktemp + local LAST_OUT=/tmp/tmp.oQFM9NvN9L ++ mktemp + local LAST_ERR=/tmp/tmp.JIMqqIHhp2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod haproxy-pxc-0 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oQFM9NvN9L pod "haproxy-pxc-0" deleted + cat /tmp/tmp.JIMqqIHhp2 + rm /tmp/tmp.oQFM9NvN9L /tmp/tmp.JIMqqIHhp2 + return 0 + sleep 3 + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.BTQ8b28ihD ++ mktemp + local LAST_ERR=/tmp/tmp.RgjWh0GSOs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BTQ8b28ihD NAME READY STATUS RESTARTS AGE haproxy-haproxy-0 3/3 Running 0 7m30s haproxy-haproxy-1 3/3 Running 0 6m14s haproxy-haproxy-2 3/3 Running 0 5m54s haproxy-pxc-0 0/1 Init:0/1 0 5s haproxy-pxc-1 1/1 Running 0 6m17s haproxy-pxc-2 1/1 Running 0 5m5s pxc-client-778b75bfb4-4gd4b 1/1 Running 0 7m42s + cat /tmp/tmp.RgjWh0GSOs + rm /tmp/tmp.BTQ8b28ihD /tmp/tmp.RgjWh0GSOs + return 0 + check_haproxy_writer ++ seq 0 2 + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nLnK4PeEox +++ mktemp ++ local LAST_ERR=/tmp/tmp.AGV28NOfr4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nLnK4PeEox ++ cat /tmp/tmp.AGV28NOfr4 ++ rm /tmp/tmp.nLnK4PeEox /tmp/tmp.AGV28NOfr4 ++ return 0 + local haproxy_pod_ip=10.133.137.8 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.137.8 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.137.8 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SUqaE6lVBY +++ mktemp ++ local LAST_ERR=/tmp/tmp.lo9sYJDcEw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SUqaE6lVBY ++ cat /tmp/tmp.lo9sYJDcEw ++ rm /tmp/tmp.SUqaE6lVBY /tmp/tmp.lo9sYJDcEw ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fUOK7aBg7s +++ mktemp ++ local LAST_ERR=/tmp/tmp.xnklIVufhX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fUOK7aBg7s ++ cat /tmp/tmp.xnklIVufhX ++ rm /tmp/tmp.fUOK7aBg7s /tmp/tmp.xnklIVufhX ++ return 0 + local haproxy_pod_ip=10.133.136.12 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.136.12 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.136.12 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U3wm7GOsY7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nqDT1VAlJd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U3wm7GOsY7 ++ cat /tmp/tmp.nqDT1VAlJd ++ rm /tmp/tmp.U3wm7GOsY7 /tmp/tmp.nqDT1VAlJd ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ echo pxc-client-778b75bfb4-4gd4b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace + for i in '$(seq 0 2)' ++ kubectl_bin get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FI5CcXgjOy +++ mktemp ++ local LAST_ERR=/tmp/tmp.C6FUZBagRf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FI5CcXgjOy ++ cat /tmp/tmp.C6FUZBagRf ++ rm /tmp/tmp.FI5CcXgjOy /tmp/tmp.C6FUZBagRf ++ return 0 + local haproxy_pod_ip=10.133.138.9 + run_mysql 'SHOW VARIABLES LIKE '\''server_id'\''' '-h 10.133.138.9 -uroot -proot_password' + local 'command=SHOW VARIABLES LIKE '\''server_id'\''' + local 'uri=-h 10.133.138.9 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ny31JSReGX +++ mktemp ++ local LAST_ERR=/tmp/tmp.KWssRpGgwa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ny31JSReGX ++ cat /tmp/tmp.KWssRpGgwa ++ rm /tmp/tmp.Ny31JSReGX /tmp/tmp.KWssRpGgwa ++ return 0 + client_pod=pxc-client-778b75bfb4-4gd4b + wait_pod pxc-client-778b75bfb4-4gd4b + local pod=pxc-client-778b75bfb4-4gd4b + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-778b75bfb4-4gd4b ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-778b75bfb4-4gd4b.Ok + set +o xtrace ++ seq 0 1 + for i in '$(seq 0 1)' + diff -u //tmp/tmp.j6dTYMCuSE/server_id_0.sql //tmp/tmp.j6dTYMCuSE/server_id_1.sql + for i in '$(seq 0 1)' + diff -u //tmp/tmp.j6dTYMCuSE/server_id_1.sql //tmp/tmp.j6dTYMCuSE/server_id_2.sql + desc 'check advanced options are enabled in haproxy statefulset' + set +o xtrace ----------------------------------------------------------------------------------- check advanced options are enabled in haproxy statefulset ----------------------------------------------------------------------------------- + compare_kubectl pdb/haproxy-haproxy + local resource=pdb/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml + local new_result=/tmp/tmp.j6dTYMCuSE/pdb_haproxy-haproxy.yml + desc 'compare pdb/haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare pdb/haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.24' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + kubectl_bin get -o yaml pdb/haproxy-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.jiQwhe7iR3 ++ mktemp + local LAST_ERR=/tmp/tmp.0yiafJY2P6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml pdb/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jiQwhe7iR3 + cat /tmp/tmp.0yiafJY2P6 + rm /tmp/tmp.jiQwhe7iR3 /tmp/tmp.0yiafJY2P6 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml /tmp/tmp.j6dTYMCuSE/pdb_haproxy-haproxy.yml + compare_kubectl statefulset/haproxy-haproxy + local resource=statefulset/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml + local new_result=/tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + desc 'compare statefulset/haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.24' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/haproxy-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.CzE0IlbLwr ++ mktemp + local LAST_ERR=/tmp/tmp.J7jSUQZ1gi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CzE0IlbLwr + cat /tmp/tmp.J7jSUQZ1gi + rm /tmp/tmp.CzE0IlbLwr /tmp/tmp.J7jSUQZ1gi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml /tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + desc 'default haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- default haproxy-replicas service ----------------------------------------------------------------------------------- + test_default_replicas_service haproxy + kpatch_delete_field pxc haproxy /spec/haproxy/replicasServiceEnabled + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' ++ mktemp + local LAST_OUT=/tmp/tmp.1BsuDpFgqQ ++ mktemp + local LAST_ERR=/tmp/tmp.sqbCPTQZcc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc haproxy --type=json -p '[{"op": "remove", "path": "/spec/haproxy/replicasServiceEnabled"}]' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.1BsuDpFgqQ + cat /tmp/tmp.sqbCPTQZcc The request is invalid + rm /tmp/tmp.1BsuDpFgqQ /tmp/tmp.sqbCPTQZcc + return 1 + compare_kubectl service/haproxy-haproxy-replicas + local resource=service/haproxy-haproxy-replicas + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml + local new_result=/tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy-replicas.yml + desc 'compare service/haproxy-haproxy-replicas-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/haproxy-haproxy-replicas- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122-eks.yml ']' + kubectl_bin get -o yaml service/haproxy-haproxy-replicas ++ mktemp + local LAST_OUT=/tmp/tmp.eT0OLedyMb ++ mktemp + local LAST_ERR=/tmp/tmp.T2m5umQv4b + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/haproxy-haproxy-replicas + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eT0OLedyMb + cat /tmp/tmp.T2m5umQv4b + rm /tmp/tmp.eT0OLedyMb /tmp/tmp.T2m5umQv4b + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml /tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy-replicas.yml + desc 'disable haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- disable haproxy-replicas service ----------------------------------------------------------------------------------- + test_disable_replicas_service haproxy + kpatch_set_field pxc haproxy /spec/haproxy/replicasServiceEnabled false + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + local value=false + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.fgw26VTOVl ++ mktemp + local LAST_ERR=/tmp/tmp.G5WTZPXiqs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fgw26VTOVl + cat /tmp/tmp.G5WTZPXiqs + rm /tmp/tmp.fgw26VTOVl /tmp/tmp.G5WTZPXiqs + return 0 + sleep 1 + wait_for_delete svc/haproxy-haproxy-replicas + local res=svc/haproxy-haproxy-replicas + echo -n 'svc/haproxy-haproxy-replicas - ' svc/haproxy-haproxy-replicas - + set +o xtrace Error from server (NotFound): services "haproxy-haproxy-replicas" not found + grep -e 'not found$' + desc 'enable haproxy-replicas service' + set +o xtrace ----------------------------------------------------------------------------------- enable haproxy-replicas service ----------------------------------------------------------------------------------- + test_enable_replicas_service haproxy + kpatch_set_field pxc haproxy /spec/haproxy/replicasServiceEnabled true + local type=pxc + local name=haproxy + local path=/spec/haproxy/replicasServiceEnabled + local value=true + kubectl_bin patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.SRANgN3XGg ++ mktemp + local LAST_ERR=/tmp/tmp.HIOocN6973 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc haproxy --type=json -p '[{"op": "replace", "path": "/spec/haproxy/replicasServiceEnabled", "value": true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SRANgN3XGg + cat /tmp/tmp.HIOocN6973 + rm /tmp/tmp.SRANgN3XGg /tmp/tmp.HIOocN6973 + return 0 + sleep 1 + compare_kubectl service/haproxy-haproxy-replicas + local resource=service/haproxy-haproxy-replicas + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas.yml + local new_result=/tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy-replicas.yml + desc 'compare service/haproxy-haproxy-replicas-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/haproxy-haproxy-replicas- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.24' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml service/haproxy-haproxy-replicas ++ mktemp + local LAST_OUT=/tmp/tmp.mGzo8mMGBX ++ mktemp + local LAST_ERR=/tmp/tmp.O5lKtOPJei + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/haproxy-haproxy-replicas + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mGzo8mMGBX + cat /tmp/tmp.O5lKtOPJei + rm /tmp/tmp.mGzo8mMGBX /tmp/tmp.O5lKtOPJei + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-replicas-k122.yml /tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy-replicas.yml + desc 'enable proxy-sql' + set +o xtrace ----------------------------------------------------------------------------------- enable proxy-sql ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy-proxysql.yml + '[' -z '' ']' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.B15htVbzHQ + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy-proxysql.yml + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy-proxysql.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.VHagA3vhAY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.B15htVbzHQ perconaxtradbcluster.pxc.percona.com/haproxy configured + cat /tmp/tmp.VHagA3vhAY + rm /tmp/tmp.B15htVbzHQ /tmp/tmp.VHagA3vhAY + return 0 + wait_for_running haproxy-proxysql 2 + local name=haproxy-proxysql + let last_pod=1 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 1 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-proxysql-0 480 + local pod=haproxy-proxysql-0 + local max_retry=480 + local ns= ++ echo haproxy-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace haproxy-proxysql-0......Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-proxysql-1 480 + local pod=haproxy-proxysql-1 + local max_retry=480 + local ns= ++ echo haproxy-proxysql-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace haproxy-proxysql-1..Ok + compare_kubectl statefulset/haproxy-proxysql + local resource=statefulset/haproxy-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql.yml + local new_result=/tmp/tmp.j6dTYMCuSE/statefulset_haproxy-proxysql.yml + desc 'compare statefulset/haproxy-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/haproxy-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/haproxy-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.iI5tZG1iEi ++ mktemp + local LAST_ERR=/tmp/tmp.DdAmZQ8ycT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/haproxy-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iI5tZG1iEi + cat /tmp/tmp.DdAmZQ8ycT + rm /tmp/tmp.iI5tZG1iEi /tmp/tmp.DdAmZQ8ycT + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql.yml /tmp/tmp.j6dTYMCuSE/statefulset_haproxy-proxysql.yml + compare_kubectl service/haproxy-proxysql + local resource=service/haproxy-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql.yml + local new_result=/tmp/tmp.j6dTYMCuSE/service_haproxy-proxysql.yml + desc 'compare service/haproxy-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/haproxy-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.24' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-k122-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-k122-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml service/haproxy-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.rnCIWKqJmC ++ mktemp + local LAST_ERR=/tmp/tmp.xxe2AYBACT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/haproxy-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rnCIWKqJmC + cat /tmp/tmp.xxe2AYBACT + rm /tmp/tmp.rnCIWKqJmC /tmp/tmp.xxe2AYBACT + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-proxysql-k122.yml /tmp/tmp.j6dTYMCuSE/service_haproxy-proxysql.yml + compare_mysql_cmd_local ping_timeout_server 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 '' proxysql + local command_id=ping_timeout_server + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local postfix= + local container_name=proxysql + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/ping_timeout_server.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 proxysql + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name=proxysql + set +o xtrace + '[' '!' -s /tmp/tmp.j6dTYMCuSE/ping_timeout_server.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/ping_timeout_server.sql /tmp/tmp.j6dTYMCuSE/ping_timeout_server.sql + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-proxysql.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-proxysql.yaml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-proxysql.yaml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + local LAST_OUT=/tmp/tmp.0wdiTWTKMf + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.4jvHHms8Vo + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0wdiTWTKMf secret/haproxy-proxysql created + cat /tmp/tmp.4jvHHms8Vo + rm /tmp/tmp.0wdiTWTKMf /tmp/tmp.4jvHHms8Vo + return 0 + wait_cluster_consistency haproxy 3 2 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aAjd4wKaff +++ mktemp ++ local LAST_ERR=/tmp/tmp.FNKXzgk0Iw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aAjd4wKaff ++ cat /tmp/tmp.FNKXzgk0Iw ++ rm /tmp/tmp.aAjd4wKaff /tmp/tmp.FNKXzgk0Iw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jhbYek0Fjq +++ mktemp ++ local LAST_ERR=/tmp/tmp.S2Pxuh95Kf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jhbYek0Fjq ++ cat /tmp/tmp.S2Pxuh95Kf ++ rm /tmp/tmp.jhbYek0Fjq /tmp/tmp.S2Pxuh95Kf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dKtvGpmzx9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ra2RRgc6IL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dKtvGpmzx9 ++ cat /tmp/tmp.ra2RRgc6IL ++ rm /tmp/tmp.dKtvGpmzx9 /tmp/tmp.ra2RRgc6IL ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FW4I0It3ye +++ mktemp ++ local LAST_ERR=/tmp/tmp.J3M9a7JqBp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FW4I0It3ye ++ cat /tmp/tmp.J3M9a7JqBp ++ rm /tmp/tmp.FW4I0It3ye /tmp/tmp.J3M9a7JqBp ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.brUBMuN8ie ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.wXZSrFf92u +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.brUBMuN8ie +++++ cat /tmp/tmp.wXZSrFf92u +++++ rm /tmp/tmp.brUBMuN8ie /tmp/tmp.wXZSrFf92u +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.00ae6XPqBX ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.5EXWFA6W6u +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.00ae6XPqBX +++++ cat /tmp/tmp.5EXWFA6W6u +++++ rm /tmp/tmp.00ae6XPqBX /tmp/tmp.5EXWFA6W6u +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-proxysql ++++ return +++ local cluster_proxy=haproxy-proxysql +++ echo proxysql ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b2vjxWWCcb +++ mktemp ++ local LAST_ERR=/tmp/tmp.z70QexD2fg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.b2vjxWWCcb ++ cat /tmp/tmp.z70QexD2fg ++ rm /tmp/tmp.b2vjxWWCcb /tmp/tmp.z70QexD2fg ++ return 0 + [[ 2 == \2 ]] + compare_kubectl statefulset/haproxy-proxysql -secret + local resource=statefulset/haproxy-proxysql + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret.yml + local new_result=/tmp/tmp.j6dTYMCuSE/statefulset_haproxy-proxysql.yml + desc 'compare statefulset/haproxy-proxysql--secret' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/haproxy-proxysql--secret ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret-eks.yml ']' + kubectl_bin get -o yaml statefulset/haproxy-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.KtzbQxooKI ++ mktemp + local LAST_ERR=/tmp/tmp.raD0tMK4Tq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/haproxy-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.KtzbQxooKI + cat /tmp/tmp.raD0tMK4Tq + rm /tmp/tmp.KtzbQxooKI /tmp/tmp.raD0tMK4Tq + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-proxysql-secret.yml /tmp/tmp.j6dTYMCuSE/statefulset_haproxy-proxysql.yml + compare_mysql_cmd_local ping_timeout_server 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 -secret proxysql + local command_id=ping_timeout_server + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local postfix=-secret + local container_name=proxysql + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/ping_timeout_server-secret.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + run_mysql_local 'SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' haproxy-proxysql-0 proxysql + local 'command=SELECT * FROM global_variables WHERE variable_name = '\''\'\'''\''mysql-ping_timeout_server'\''\'\'''\'';' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=haproxy-proxysql-0 + local container_name=proxysql + set +o xtrace + '[' '!' -s /tmp/tmp.j6dTYMCuSE/ping_timeout_server.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/ping_timeout_server-secret.sql /tmp/tmp.j6dTYMCuSE/ping_timeout_server.sql + wait_cluster_consistency haproxy 3 2 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wsvx50OvFA +++ mktemp ++ local LAST_ERR=/tmp/tmp.frvJB28cn6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wsvx50OvFA ++ cat /tmp/tmp.frvJB28cn6 ++ rm /tmp/tmp.wsvx50OvFA /tmp/tmp.frvJB28cn6 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tSCYZc62Vc +++ mktemp ++ local LAST_ERR=/tmp/tmp.5UIuC1uaB9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tSCYZc62Vc ++ cat /tmp/tmp.5UIuC1uaB9 ++ rm /tmp/tmp.tSCYZc62Vc /tmp/tmp.5UIuC1uaB9 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.QJ3FW7nnkc ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Rok4OFUoO9 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.QJ3FW7nnkc +++++ cat /tmp/tmp.Rok4OFUoO9 +++++ rm /tmp/tmp.QJ3FW7nnkc /tmp/tmp.Rok4OFUoO9 +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.8TnWgmR7s6 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.6aGTwZMLEE +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.8TnWgmR7s6 +++++ cat /tmp/tmp.6aGTwZMLEE +++++ rm /tmp/tmp.8TnWgmR7s6 /tmp/tmp.6aGTwZMLEE +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-proxysql ++++ return +++ local cluster_proxy=haproxy-proxysql +++ echo proxysql ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XPzzHSDYR0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NH8isyq6r9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XPzzHSDYR0 ++ cat /tmp/tmp.NH8isyq6r9 ++ rm /tmp/tmp.XPzzHSDYR0 /tmp/tmp.NH8isyq6r9 ++ return 0 + [[ 2 == \2 ]] + desc 're-enable haproxy' + set +o xtrace ----------------------------------------------------------------------------------- re-enable haproxy ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/haproxy.yml + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_OUT=/tmp/tmp.dvhq9iZzSs + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' ++ mktemp + local LAST_ERR=/tmp/tmp.ohAm6eqCGw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dvhq9iZzSs perconaxtradbcluster.pxc.percona.com/haproxy configured + cat /tmp/tmp.ohAm6eqCGw + rm /tmp/tmp.dvhq9iZzSs /tmp/tmp.ohAm6eqCGw + return 0 + wait_for_running haproxy-haproxy 3 + local name=haproxy-haproxy + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-0 480 + local pod=haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-0....error: a container name must be specified for pod haproxy-haproxy-0, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-1 480 + local pod=haproxy-haproxy-1 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-1...error: a container name must be specified for pod haproxy-haproxy-1, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + for i in '$(seq 0 $last_pod)' + wait_pod haproxy-haproxy-2 480 + local pod=haproxy-haproxy-2 + local max_retry=480 + local ns= ++ echo haproxy-haproxy-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace haproxy-haproxy-2error: a container name must be specified for pod haproxy-haproxy-2, choose one of: [haproxy pxc-monit my-sidecar-1] .Ok + compare_kubectl statefulset/haproxy-haproxy + local resource=statefulset/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml + local new_result=/tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + desc 'compare statefulset/haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.24' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/haproxy-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.AAIL7mszV0 ++ mktemp + local LAST_ERR=/tmp/tmp.b81CBVZks1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AAIL7mszV0 + cat /tmp/tmp.b81CBVZks1 + rm /tmp/tmp.AAIL7mszV0 /tmp/tmp.b81CBVZks1 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy.yml /tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + compare_kubectl service/haproxy-haproxy + local resource=service/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy.yml + local new_result=/tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy.yml + desc 'compare service/haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare service/haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-k122.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-k122.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-k122-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-k122-eks.yml ']' + kubectl_bin get -o yaml service/haproxy-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | ++ mktemp del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.qlniqwI35B ++ mktemp + local LAST_ERR=/tmp/tmp.Kj9uTG3JSe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qlniqwI35B + cat /tmp/tmp.Kj9uTG3JSe + rm /tmp/tmp.qlniqwI35B /tmp/tmp.Kj9uTG3JSe + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/service_haproxy-haproxy-k122.yml /tmp/tmp.j6dTYMCuSE/service_haproxy-haproxy.yml + compare_kubectl pdb/haproxy-haproxy + local resource=pdb/haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml + local new_result=/tmp/tmp.j6dTYMCuSE/pdb_haproxy-haproxy.yml + desc 'compare pdb/haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare pdb/haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy-eks.yml ']' + kubectl_bin get -o yaml pdb/haproxy-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.US7yuSZYlT ++ mktemp + local LAST_ERR=/tmp/tmp.FBGnv50NiF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml pdb/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.US7yuSZYlT + cat /tmp/tmp.FBGnv50NiF + rm /tmp/tmp.US7yuSZYlT /tmp/tmp.FBGnv50NiF + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/pdb_haproxy-haproxy.yml /tmp/tmp.j6dTYMCuSE/pdb_haproxy-haproxy.yml + kubectl_bin exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + grep Maxconn: ++ mktemp + local LAST_OUT=/tmp/tmp.D86Sg9QUZE ++ mktemp + local LAST_ERR=/tmp/tmp.9tccrjuN7T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D86Sg9QUZE + cat /tmp/tmp.9tccrjuN7T Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.D86Sg9QUZE /tmp/tmp.9tccrjuN7T + return 0 + diff --strip-trailing-cr /tmp/tmp.j6dTYMCuSE/haproxy_maxconn.txt /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/haproxy_maxconn.txt + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-haproxy.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-haproxy.yaml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/conf/config-secret-haproxy.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1131-d64e70d4#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.haproxy-21703~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' ++ mktemp + local LAST_OUT=/tmp/tmp.UagUmp17V2 ++ mktemp + local LAST_ERR=/tmp/tmp.R4jnjnZbmB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UagUmp17V2 secret/haproxy-haproxy created + cat /tmp/tmp.R4jnjnZbmB + rm /tmp/tmp.UagUmp17V2 /tmp/tmp.R4jnjnZbmB + return 0 + wait_cluster_consistency haproxy 3 3 + local cluster_name=haproxy + local cluster_size=3 + local proxy_size=3 + '[' -z 3 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + sleep 7 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.feQDGODY4D +++ mktemp ++ local LAST_ERR=/tmp/tmp.2t6yONOrLW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.feQDGODY4D ++ cat /tmp/tmp.2t6yONOrLW ++ rm /tmp/tmp.feQDGODY4D /tmp/tmp.2t6yONOrLW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bMcmHAJzXY +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lz5rfENpct ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bMcmHAJzXY ++ cat /tmp/tmp.Lz5rfENpct ++ rm /tmp/tmp.bMcmHAJzXY /tmp/tmp.Lz5rfENpct ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hdEB8i7Hig +++ mktemp ++ local LAST_ERR=/tmp/tmp.9v64MqRxKE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hdEB8i7Hig ++ cat /tmp/tmp.9v64MqRxKE ++ rm /tmp/tmp.hdEB8i7Hig /tmp/tmp.9v64MqRxKE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9RCaT3Xei2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j8YReJCQvY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9RCaT3Xei2 ++ cat /tmp/tmp.j8YReJCQvY ++ rm /tmp/tmp.9RCaT3Xei2 /tmp/tmp.j8YReJCQvY ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3zCxzhNbST +++ mktemp ++ local LAST_ERR=/tmp/tmp.HPqqsDoAfd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3zCxzhNbST ++ cat /tmp/tmp.HPqqsDoAfd ++ rm /tmp/tmp.3zCxzhNbST /tmp/tmp.HPqqsDoAfd ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine haproxy +++ local cluster_name=haproxy ++++ get_proxy haproxy ++++ local target_cluster=haproxy +++++ kubectl_bin get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.OZ7SWelrFQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.3RiqtlZsTF +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.OZ7SWelrFQ +++++ cat /tmp/tmp.3RiqtlZsTF +++++ rm /tmp/tmp.OZ7SWelrFQ /tmp/tmp.3RiqtlZsTF +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo haproxy-haproxy ++++ return +++ local cluster_proxy=haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RxPERAur3y +++ mktemp ++ local LAST_ERR=/tmp/tmp.hwx7POgXWV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RxPERAur3y ++ cat /tmp/tmp.hwx7POgXWV ++ rm /tmp/tmp.RxPERAur3y /tmp/tmp.hwx7POgXWV ++ return 0 + [[ 3 == \3 ]] + compare_kubectl statefulset/haproxy-haproxy -secret + local resource=statefulset/haproxy-haproxy + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret.yml + local new_result=/tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + desc 'compare statefulset/haproxy-haproxy--secret' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/haproxy-haproxy--secret ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.24' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.22 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.22 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("haproxy-21703", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/haproxy-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.th5U44yVO3 ++ mktemp + local LAST_ERR=/tmp/tmp.7vCmaxL2b8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.th5U44yVO3 + cat /tmp/tmp.7vCmaxL2b8 + rm /tmp/tmp.th5U44yVO3 /tmp/tmp.7vCmaxL2b8 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/statefulset_haproxy-haproxy-secret.yml /tmp/tmp.j6dTYMCuSE/statefulset_haproxy-haproxy.yml + grep Maxconn: + kubectl_bin exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' ++ mktemp + local LAST_OUT=/tmp/tmp.grs6A2ORqv ++ mktemp + local LAST_ERR=/tmp/tmp.r23gnqW7rL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec haproxy-haproxy-0 -c haproxy -it -- bash -c 'echo "show info" | socat stdio unix-connect:/etc/haproxy/pxc/haproxy.sock' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.grs6A2ORqv + cat /tmp/tmp.r23gnqW7rL Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.grs6A2ORqv /tmp/tmp.r23gnqW7rL + return 0 + diff --strip-trailing-cr /tmp/tmp.j6dTYMCuSE/haproxy_maxconn.txt /mnt/jenkins/workspace/cloud-pxc-operator_PR-1131/e2e-tests/haproxy/compare/haproxy_maxconn-secret.txt + desc 'clean up' + set +o xtrace ----------------------------------------------------------------------------------- clean up ----------------------------------------------------------------------------------- + destroy haproxy-21703 + local namespace=haproxy-21703 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + sort -u + grep -v 'the object has been modified' + tee /tmp/tmp.j6dTYMCuSE/operator.log + grep -v level=info + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.DmfNj6nHmw +++ mktemp ++ local LAST_ERR=/tmp/tmp.FguelETXem ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DmfNj6nHmw ++ cat /tmp/tmp.FguelETXem ++ rm /tmp/tmp.DmfNj6nHmw /tmp/tmp.FguelETXem ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-58bddf54b8-pz69b ++ mktemp + local LAST_OUT=/tmp/tmp.7HS1O8PZ1g ++ mktemp + local LAST_ERR=/tmp/tmp.pAqM4h7G0Y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-58bddf54b8-pz69b + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7HS1O8PZ1g + cat /tmp/tmp.pAqM4h7G0Y + rm /tmp/tmp.7HS1O8PZ1g /tmp/tmp.pAqM4h7G0Y + return 0 2023-04-20T10:45:09.701Z INFO setup Manager starting up {"gitCommit": "d64e70d44804dd626500b1fbc99331b93a5624b1", "gitBranch": "PR-1131-d64e70d4", "goVersion": "go1.19.8", "os": "linux", "arch": "amd64"} 2023-04-20T10:45:09.701Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.22.17-gke.7500"} 2023-04-20T10:45:10.359Z INFO controller-runtime.metrics Metrics server is starting to listen {"addr": ":8080"} 2023-04-20T10:45:10.359Z INFO setup Registering Components. 2023-04-20T10:45:12.499Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2023-04-20T10:45:12.499Z INFO controller-runtime.certwatcher Updated current TLS certificate 2023-04-20T10:45:12.499Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2023-04-20T10:45:12.499Z INFO controller-runtime.webhook.webhooks Starting webhook server 2023-04-20T10:45:12.499Z INFO setup Starting the Cmd. 2023-04-20T10:45:12.499Z INFO Starting server {"kind": "health probe", "addr": "[::]:8081"} 2023-04-20T10:45:12.499Z INFO Starting server {"path": "/metrics", "kind": "metrics", "addr": "[::]:8080"} 2023-04-20T10:45:12.500Z INFO controller-runtime.certwatcher Starting certificate watcher 2023-04-20T10:45:12.500Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2023-04-20T10:45:12.510Z INFO Starting Controller {"controller": "perconaxtradbclusterbackup-controller"} 2023-04-20T10:45:12.510Z INFO Starting Controller {"controller": "perconaxtradbcluster-controller"} 2023-04-20T10:45:12.510Z INFO Starting Controller {"controller": "perconaxtradbclusterrestore-controller"} 2023-04-20T10:45:12.510Z INFO Starting EventSource {"controller": "perconaxtradbclusterbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2023-04-20T10:45:12.510Z INFO Starting EventSource {"controller": "perconaxtradbcluster-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2023-04-20T10:45:12.510Z INFO Starting EventSource {"controller": "perconaxtradbclusterrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2023-04-20T10:45:12.510Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2023-04-20T10:45:12.786Z INFO Starting workers {"controller": "perconaxtradbclusterbackup-controller", "worker count": 1} 2023-04-20T10:45:12.786Z INFO Starting workers {"controller": "perconaxtradbcluster-controller", "worker count": 20} 2023-04-20T10:45:12.786Z INFO Starting workers {"controller": "perconaxtradbclusterrestore-controller", "worker count": 1} {"level":"error",,"logger":"perconaxtradbcluster","caller":"pxc/controller.go:1245","msg":"sync users","cluster":"haproxy","namespace":"haproxy-21703","error":"exec syncusers: command terminated with exit code 1 / / ERROR (line:387) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n","errorVerbose":"exec syncusers: command terminated with exit code 1 / / ERROR (line:387) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:969\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1243\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1594","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1245"} {"level":"error",,"logger":"perconaxtradbcluster","caller":"pxc/controller.go:1245","msg":"sync users","cluster":"haproxy","namespace":"haproxy-21703","error":"exec syncusers: Internal error occurred: error executing command in container: failed to exec in container: failed to create exec \"454e4bdcf38162088e2c7bf44139dc9be925ae36d8773e373c3c08811c62e16f\": task a2c7a96dfe058be08d83b2970ba50ae7361ec6cf22133a00dcc440c420dee899 not found: not found / / ","errorVerbose":"exec syncusers: Internal error occurred: error executing command in container: failed to exec in container: failed to create exec \"454e4bdcf38162088e2c7bf44139dc9be925ae36d8773e373c3c08811c62e16f\": task a2c7a96dfe058be08d83b2970ba50ae7361ec6cf22133a00dcc440c420dee899 not found: not found / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:969\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1243\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1594","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1245"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/controller.go:452","msg":"reconcile replication error","cluster":"haproxy","namespace":"haproxy-21703","err":"get primary pxc pod: failed to get proxy connection: dial tcp 10.133.156.1:3306: connect: connection refused"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/controller.go:452","msg":"reconcile replication error","cluster":"haproxy","namespace":"haproxy-21703","err":"get primary pxc pod: failed to get proxy connection: dial tcp: lookup haproxy-proxysql-unready.haproxy-21703 on 10.133.144.10:53: no such host"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/controller.go:452","msg":"reconcile replication error","cluster":"haproxy","namespace":"haproxy-21703","err":"get primary pxc pod: not found"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/users.go:532","msg":"User monitor: granted privileges","cluster":"haproxy","namespace":"haproxy-21703"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/users.go:729","msg":"User xtrabackup: granted privileges","cluster":"haproxy","namespace":"haproxy-21703"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/version.go:343","msg":"update PXC version (fetched from db)","cluster":"haproxy","namespace":"haproxy-21703","new version":"5.7.41-44-57"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"pxc/version.go:390","msg":"Set CR version","cluster":"haproxy","namespace":"haproxy-21703","version":"1.13.0"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"v1/pxc_types.go:1139","msg":"Wrong sidecar container name, it is skipped","cluster":"haproxy","namespace":"haproxy-21703","containerName":"haproxy"} {"level":"info",,"logger":"perconaxtradbcluster","caller":"v1/pxc_types.go:993","msg":"Setting safe defaults, updating ProxySQL size","cluster":"haproxy","namespace":"haproxy-21703","oldSize":1,"newSize":2} + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n haproxy-21703 haproxy --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/haproxy patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.27myr5etvK ++ mktemp + local LAST_ERR=/tmp/tmp.SgNChEoETf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.27myr5etvK perconaxtradbcluster.pxc.percona.com "haproxy" deleted + cat /tmp/tmp.SgNChEoETf + rm /tmp/tmp.27myr5etvK /tmp/tmp.SgNChEoETf + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.VL3aW94Mnr ++ mktemp + local LAST_ERR=/tmp/tmp.9InJ6nnFzY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VL3aW94Mnr No resources found + cat /tmp/tmp.9InJ6nnFzY + rm /tmp/tmp.VL3aW94Mnr /tmp/tmp.9InJ6nnFzY + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.IzKPO4o4qj ++ mktemp + local LAST_ERR=/tmp/tmp.UqpEQuZM3b + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IzKPO4o4qj No resources found + cat /tmp/tmp.UqpEQuZM3b + rm /tmp/tmp.IzKPO4o4qj /tmp/tmp.UqpEQuZM3b + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.FaNeEAz56E ++ mktemp + local LAST_ERR=/tmp/tmp.jPZe39sQ3W + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FaNeEAz56E validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.jPZe39sQ3W + rm /tmp/tmp.FaNeEAz56E /tmp/tmp.jPZe39sQ3W + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.8.0/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.j6dTYMCuSE + kubectl_bin delete --grace-period=0 --force=true namespace haproxy-21703 + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ccZnXHwsnk + local LAST_OUT=/tmp/tmp.WTYhnWU0ke + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.llJigarH79 + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.yQwKIyi3mV + local exit_status=0 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace haproxy-21703 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator