++ echo 'Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/scaling-proxysql.log' Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/scaling-proxysql.log ++ '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: No Auth Provider found for name "gcp" +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' +++ kubectl version -o json ++ '[' ']' ++ EKS=0 +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ kubectl version -o json ++ KUBE_VERSION=1.20 +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' +++ helm version -c ++ HELM_VERSION=v3.8.1 ++ '[' v3 == v2 ']' + create_infra scaling-proxysql-12663 + local ns=scaling-proxysql-12663 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name, label selector, or --all flag specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.nLxFz32LUH ++ mktemp + local LAST_ERR=/tmp/tmp.2TDBG0cGXw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.nLxFz32LUH No resources found + cat /tmp/tmp.2TDBG0cGXw + rm /tmp/tmp.nLxFz32LUH /tmp/tmp.2TDBG0cGXw + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ocqm95xpmD ++ mktemp + local LAST_ERR=/tmp/tmp.EHhX6B5Hxd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ocqm95xpmD No resources found + cat /tmp/tmp.EHhX6B5Hxd + rm /tmp/tmp.ocqm95xpmD /tmp/tmp.EHhX6B5Hxd + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4ohbnrNSux ++ mktemp + local LAST_ERR=/tmp/tmp.gA4HsRV72C + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.4ohbnrNSux No resources found + cat /tmp/tmp.gA4HsRV72C + rm /tmp/tmp.4ohbnrNSux /tmp/tmp.gA4HsRV72C + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + '[' '!' -z '' ']' + kubectl_bin delete namespace pxc-operator + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.wYLiA3YHO8 ++ mktemp + xargs kubectl delete ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.JYTeQsjjCB ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.OHZAvyThJe + local exit_status=0 + local LAST_ERR=/tmp/tmp.r5gQVQhrFG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.wYLiA3YHO8 + cat /tmp/tmp.r5gQVQhrFG + rm /tmp/tmp.wYLiA3YHO8 /tmp/tmp.r5gQVQhrFG + return 0 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.JYTeQsjjCB namespace "pxc-operator" deleted + cat /tmp/tmp.OHZAvyThJe + rm /tmp/tmp.JYTeQsjjCB /tmp/tmp.OHZAvyThJe + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + set +o xtrace namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tgYV0Nyi9Q ++ mktemp + local LAST_ERR=/tmp/tmp.L5rDQjPs7A + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.tgYV0Nyi9Q namespace/pxc-operator created + cat /tmp/tmp.L5rDQjPs7A + rm /tmp/tmp.tgYV0Nyi9Q /tmp/tmp.L5rDQjPs7A + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.vchDRne6GT +++ mktemp ++ local LAST_ERR=/tmp/tmp.UEmLz11TAQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.vchDRne6GT ++ cat /tmp/tmp.UEmLz11TAQ ++ rm /tmp/tmp.vchDRne6GT /tmp/tmp.UEmLz11TAQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mqVZRwyy60 ++ mktemp + local LAST_ERR=/tmp/tmp.JaTq4ZnxC8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.mqVZRwyy60 Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling" modified. + cat /tmp/tmp.JaTq4ZnxC8 + rm /tmp/tmp.mqVZRwyy60 /tmp/tmp.JaTq4ZnxC8 + return 0 + deploy_operator + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.p3KywdRUbt ++ mktemp + local LAST_ERR=/tmp/tmp.he9EstY1t0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.p3KywdRUbt customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbbackups.pxc.percona.com configured + cat /tmp/tmp.he9EstY1t0 + rm /tmp/tmp.p3KywdRUbt /tmp/tmp.he9EstY1t0 + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JE7QkBkgIi ++ mktemp + local LAST_ERR=/tmp/tmp.LDL2AzP1Se + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.JE7QkBkgIi clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.LDL2AzP1Se + rm /tmp/tmp.JE7QkBkgIi /tmp/tmp.LDL2AzP1Se + return 0 + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Hl9CumaTXt ++ mktemp + local LAST_ERR=/tmp/tmp.GzoAxFNAu9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a^' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.Hl9CumaTXt deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.GzoAxFNAu9 + rm /tmp/tmp.Hl9CumaTXt /tmp/tmp.GzoAxFNAu9 + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.43u8FRvUnd +++ mktemp ++ local LAST_ERR=/tmp/tmp.C2N7rhoQ76 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.43u8FRvUnd ++ cat /tmp/tmp.C2N7rhoQ76 ++ rm /tmp/tmp.43u8FRvUnd /tmp/tmp.C2N7rhoQ76 ++ return 0 + wait_pod percona-xtradb-cluster-operator-5699d7755d-rnk5w 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5699d7755d-rnk5w + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-5699d7755d-rnk5w ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace percona-xtradb-cluster-operator-5699d7755d-rnk5w.Ok + sleep 3 + create_namespace scaling-proxysql-12663 + local namespace=scaling-proxysql-12663 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + '[' '!' -z '' ']' + kubectl_bin delete namespace scaling-proxysql-12663 + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.aBAHdw9XuQ + local LAST_OUT=/tmp/tmp.pQj9ZSRyK3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.BTvCv3e8vY + local LAST_ERR=/tmp/tmp.r1VxPTMLev + local exit_status=0 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + kubectl get ns + kubectl delete namespace scaling-proxysql-12663 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.pQj9ZSRyK3 + cat /tmp/tmp.BTvCv3e8vY + rm /tmp/tmp.pQj9ZSRyK3 /tmp/tmp.BTvCv3e8vY + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace scaling-proxysql-12663 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace scaling-proxysql-12663 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.aBAHdw9XuQ + cat /tmp/tmp.r1VxPTMLev Error from server (NotFound): namespaces "scaling-proxysql-12663" not found + rm /tmp/tmp.aBAHdw9XuQ /tmp/tmp.r1VxPTMLev + return 1 + : + wait_for_delete namespace/scaling-proxysql-12663 + local res=namespace/scaling-proxysql-12663 + set +o xtrace namespace/scaling-proxysql-12663 - Error from server (NotFound): namespaces "scaling-proxysql-12663" not found + kubectl_bin create namespace scaling-proxysql-12663 ++ mktemp + local LAST_OUT=/tmp/tmp.yUvpYYzySn ++ mktemp + local LAST_ERR=/tmp/tmp.6IIvY0EbyB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace scaling-proxysql-12663 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.yUvpYYzySn namespace/scaling-proxysql-12663 created + cat /tmp/tmp.6IIvY0EbyB + rm /tmp/tmp.yUvpYYzySn /tmp/tmp.6IIvY0EbyB + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.usxAX6bOy3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1x39afwGl4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.usxAX6bOy3 ++ cat /tmp/tmp.1x39afwGl4 ++ rm /tmp/tmp.usxAX6bOy3 /tmp/tmp.1x39afwGl4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=scaling-proxysql-12663 ++ mktemp + local LAST_OUT=/tmp/tmp.GNpR9QVsld ++ mktemp + local LAST_ERR=/tmp/tmp.6QHRKKHT0D + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=scaling-proxysql-12663 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GNpR9QVsld Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling" modified. + cat /tmp/tmp.6QHRKKHT0D + rm /tmp/tmp.GNpR9QVsld /tmp/tmp.6QHRKKHT0D + return 0 + apply_secrets + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MPt3SRU5Jj ++ mktemp + local LAST_ERR=/tmp/tmp.rOFeD425AU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MPt3SRU5Jj secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.rOFeD425AU + rm /tmp/tmp.MPt3SRU5Jj /tmp/tmp.rOFeD425AU + return 0 + cluster=scaling-proxysql + spinup_pxc scaling-proxysql /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + local cluster=scaling-proxysql + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BMXmjdBbUt ++ mktemp + local LAST_ERR=/tmp/tmp.aMVFH5L0Gd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.BMXmjdBbUt secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.aMVFH5L0Gd + rm /tmp/tmp.BMXmjdBbUt /tmp/tmp.aMVFH5L0Gd + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scaling-proxysql-12663~ + local LAST_OUT=/tmp/tmp.oxslOXMrhW + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' ++ mktemp + local LAST_ERR=/tmp/tmp.TlmPgCtkc0 + local exit_status=0 + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.oxslOXMrhW deployment.apps/pxc-client created + cat /tmp/tmp.TlmPgCtkc0 + rm /tmp/tmp.oxslOXMrhW /tmp/tmp.TlmPgCtkc0 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + local LAST_OUT=/tmp/tmp.SDA9aY6CHp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scaling-proxysql-12663~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_ERR=/tmp/tmp.UlLWu07ex0 + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.SDA9aY6CHp perconaxtradbcluster.pxc.percona.com/scaling-proxysql created + cat /tmp/tmp.UlLWu07ex0 + rm /tmp/tmp.SDA9aY6CHp /tmp/tmp.UlLWu07ex0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy scaling-proxysql ++ local target_cluster=scaling-proxysql +++ kubectl_bin get pxc scaling-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NEpR6SuirC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.unseeb3KNO +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc scaling-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.NEpR6SuirC +++ cat /tmp/tmp.unseeb3KNO +++ rm /tmp/tmp.NEpR6SuirC /tmp/tmp.unseeb3KNO +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc scaling-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AcEorCN9uI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7SDzgsqP6E +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc scaling-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.AcEorCN9uI +++ cat /tmp/tmp.7SDzgsqP6E +++ rm /tmp/tmp.AcEorCN9uI /tmp/tmp.7SDzgsqP6E +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo scaling-proxysql-proxysql ++ return + local proxy=scaling-proxysql-proxysql + wait_for_running scaling-proxysql-proxysql 1 + local name=scaling-proxysql-proxysql + let last_pod=0 + : + local max_retry=480 ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-proxysql-0 480 + local pod=scaling-proxysql-proxysql-0 + local max_retry=480 + local ns= ++ echo scaling-proxysql-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace scaling-proxysql-proxysql-0........Ok + wait_for_running scaling-proxysql-pxc 3 + local name=scaling-proxysql-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-pxc-0 480 + local pod=scaling-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo scaling-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace scaling-proxysql-pxc-0...........................Ok + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-pxc-1 480 + local pod=scaling-proxysql-pxc-1 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo scaling-proxysql-pxc-1 + local container=pxc + set +o xtrace scaling-proxysql-pxc-1......................................Ok + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-pxc-2 480 + local pod=scaling-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo scaling-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace scaling-proxysql-pxc-2...............................Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h scaling-proxysql-proxysql -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h scaling-proxysql-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p0Q4DOUvXq +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Z6gz46xJj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.p0Q4DOUvXq ++ cat /tmp/tmp.3Z6gz46xJj ++ rm /tmp/tmp.p0Q4DOUvXq /tmp/tmp.3Z6gz46xJj ++ return 0 + client_pod=pxc-client-5d749ff8b6-dx9b2 + wait_pod pxc-client-5d749ff8b6-dx9b2 + local pod=pxc-client-5d749ff8b6-dx9b2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-5d749ff8b6-dx9b2 + local container= + set +o xtrace pxc-client-5d749ff8b6-dx9b2.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h scaling-proxysql-proxysql -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h scaling-proxysql-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.egIjhRzSf8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.F6frrKHRyR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.egIjhRzSf8 ++ cat /tmp/tmp.F6frrKHRyR ++ rm /tmp/tmp.egIjhRzSf8 /tmp/tmp.F6frrKHRyR ++ return 0 + client_pod=pxc-client-5d749ff8b6-dx9b2 + wait_pod pxc-client-5d749ff8b6-dx9b2 + local pod=pxc-client-5d749ff8b6-dx9b2 + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-dx9b2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-dx9b2.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-0.scaling-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-0.scaling-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-0.scaling-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-0.scaling-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3qobIJuFFs +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q0wnVHLi3E ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.3qobIJuFFs ++ cat /tmp/tmp.Q0wnVHLi3E ++ rm /tmp/tmp.3qobIJuFFs /tmp/tmp.Q0wnVHLi3E ++ return 0 + client_pod=pxc-client-5d749ff8b6-dx9b2 + wait_pod pxc-client-5d749ff8b6-dx9b2 + local pod=pxc-client-5d749ff8b6-dx9b2 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-dx9b2 + local container= + set +o xtrace pxc-client-5d749ff8b6-dx9b2.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.030vUcOiNJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql /tmp/tmp.030vUcOiNJ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-1.scaling-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-1.scaling-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-1.scaling-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-1.scaling-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZeZ7DsNmiN +++ mktemp ++ local LAST_ERR=/tmp/tmp.8QH8w7K83f ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ZeZ7DsNmiN ++ cat /tmp/tmp.8QH8w7K83f ++ rm /tmp/tmp.ZeZ7DsNmiN /tmp/tmp.8QH8w7K83f ++ return 0 + client_pod=pxc-client-5d749ff8b6-dx9b2 + wait_pod pxc-client-5d749ff8b6-dx9b2 + local pod=pxc-client-5d749ff8b6-dx9b2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-dx9b2 ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-dx9b2.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.030vUcOiNJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql /tmp/tmp.030vUcOiNJ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-2.scaling-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-2.scaling-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h scaling-proxysql-pxc-2.scaling-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scaling-proxysql-pxc-2.scaling-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HmovLV1yTR +++ mktemp ++ local LAST_ERR=/tmp/tmp.viQgEMRAjM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.HmovLV1yTR ++ cat /tmp/tmp.viQgEMRAjM ++ rm /tmp/tmp.HmovLV1yTR /tmp/tmp.viQgEMRAjM ++ return 0 + client_pod=pxc-client-5d749ff8b6-dx9b2 + wait_pod pxc-client-5d749ff8b6-dx9b2 + local pod=pxc-client-5d749ff8b6-dx9b2 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-dx9b2 + local container= + set +o xtrace pxc-client-5d749ff8b6-dx9b2.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.030vUcOiNJ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/select-1.sql /tmp/tmp.030vUcOiNJ/select-1.sql ++ is_keyring_plugin_in_use scaling-proxysql ++ local cluster=scaling-proxysql ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ kubectl_bin exec -it scaling-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zNmXKUVUHR +++ mktemp ++ local LAST_ERR=/tmp/tmp.nDCBDT2WAx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec -it scaling-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.zNmXKUVUHR ++ cat /tmp/tmp.nDCBDT2WAx Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.zNmXKUVUHR /tmp/tmp.nDCBDT2WAx ++ return 0 + '[' '' ']' + desc 'scale up from 1 to 3' + set +o xtrace ----------------------------------------------------------------------------------- scale up from 1 to 3 ----------------------------------------------------------------------------------- + sed -e 's/size: 1/size: 3/' + kubectl_bin apply -f- + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9z0RVf8I8u + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.NhNOC0DWJ7 + local exit_status=0 + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scaling-proxysql-12663~ + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f- + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.9z0RVf8I8u perconaxtradbcluster.pxc.percona.com/scaling-proxysql configured + cat /tmp/tmp.NhNOC0DWJ7 + rm /tmp/tmp.9z0RVf8I8u /tmp/tmp.NhNOC0DWJ7 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running scaling-proxysql-proxysql 3 + local name=scaling-proxysql-proxysql + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-proxysql-0 480 + local pod=scaling-proxysql-proxysql-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo scaling-proxysql-proxysql-0 + local container=proxysql + set +o xtrace scaling-proxysql-proxysql-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-proxysql-1 480 + local pod=scaling-proxysql-proxysql-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo scaling-proxysql-proxysql-1 + local container=proxysql + set +o xtrace scaling-proxysql-proxysql-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scaling-proxysql-proxysql-2 480 + local pod=scaling-proxysql-proxysql-2 + local max_retry=480 + local ns= ++ echo scaling-proxysql-proxysql-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace scaling-proxysql-proxysql-2......Ok + sleep 25 + desc 'check if PVC created' + set +o xtrace ----------------------------------------------------------------------------------- check if PVC created ----------------------------------------------------------------------------------- + compare_kubectl pvc/proxydata-scaling-proxysql-proxysql-1 + local resource=pvc/proxydata-scaling-proxysql-proxysql-1 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-1.yml + local new_result=/tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-1.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-1-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-1-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-1-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml pvc/proxydata-scaling-proxysql-proxysql-1 + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.finalizers' + yq d - '**."percona.com/*"' + yq d - '**.volumeName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.dataSource' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.healthCheckNodePort' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.nodePort' + yq d - '**.clusterIP' + yq d - '**.imagePullSecrets' + yq d - '**.clusterIPs' + yq d - '**.enableServiceLinks' + yq d - '**.storageClassName' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - '**.procMount' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==suffix)' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - spec.ipFamilyPolicy + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'spec.volumeClaimTemplates.*.kind' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.ownerReferences.*.apiVersion' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/scaling-proxysql-12663/namespace/g + yq d - '**.creationTimestamp' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.DRWRDHR4Hs ++ mktemp + local LAST_ERR=/tmp/tmp.T3t29VZwxp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pvc/proxydata-scaling-proxysql-proxysql-1 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.DRWRDHR4Hs + cat /tmp/tmp.T3t29VZwxp + rm /tmp/tmp.DRWRDHR4Hs /tmp/tmp.T3t29VZwxp + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-1.yml /tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-1.yml + compare_kubectl pvc/proxydata-scaling-proxysql-proxysql-2 + local resource=pvc/proxydata-scaling-proxysql-proxysql-2 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2.yml + local new_result=/tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-2.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ bc -l ++ echo '1.20 >= 1.21' + '[' 0 -eq 1 ']' + return 1 + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**.nodePort' + yq d - '**.imagePullSecrets' + yq d - spec.volumeMode + kubectl_bin get -o yaml pvc/proxydata-scaling-proxysql-proxysql-2 + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - status + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.volumeName' + yq d - spec.nodeName + yq d - '**.enableServiceLinks' + yq d - '**.(name==suffix)' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.deletionTimestamp + yq d - metadata.selfLink ++ mktemp + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - '**.namespace' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.preemptionPolicy' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilies + yq d - '**.controller-uid' + yq d - spec.ipFamilyPolicy + yq d - '**.creationTimestamp' + /usr/bin/sed s/scaling-proxysql-12663/namespace/g + local LAST_OUT=/tmp/tmp.qW3YSaXvfw + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - metadata.managedFields ++ mktemp + local LAST_ERR=/tmp/tmp.vZCTcisN8L + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pvc/proxydata-scaling-proxysql-proxysql-2 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.qW3YSaXvfw + cat /tmp/tmp.vZCTcisN8L + rm /tmp/tmp.qW3YSaXvfw /tmp/tmp.vZCTcisN8L + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2.yml /tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-2.yml + desc 'check new Pods exists in ProxySQL' + set +o xtrace ----------------------------------------------------------------------------------- check new Pods exists in ProxySQL ----------------------------------------------------------------------------------- + pod0=scaling-proxysql-proxysql-0 + pod1=scaling-proxysql-proxysql-1 + pod2=scaling-proxysql-proxysql-2 + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-0 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + grep scaling-proxysql-proxysql-0 + local pod=scaling-proxysql-proxysql-0 + local container_name= + set +o xtrace scaling-proxysql-proxysql-0.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-0 + grep scaling-proxysql-proxysql-1 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-0 + local container_name= + set +o xtrace scaling-proxysql-proxysql-1.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + grep scaling-proxysql-proxysql-2 + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-0 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-0 + local container_name= + set +o xtrace scaling-proxysql-proxysql-2.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + grep scaling-proxysql-proxysql-0 + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-1 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-1 + local container_name= + set +o xtrace scaling-proxysql-proxysql-0.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + grep scaling-proxysql-proxysql-1 + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-1 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-1 + local container_name= + set +o xtrace scaling-proxysql-proxysql-1.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-1 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-1 + local container_name= + set +o xtrace + grep scaling-proxysql-proxysql-2 scaling-proxysql-proxysql-2.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-2 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-2 + local container_name= + set +o xtrace + grep scaling-proxysql-proxysql-0 scaling-proxysql-proxysql-0.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-2 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + grep scaling-proxysql-proxysql-1 + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-2 + local container_name= + set +o xtrace scaling-proxysql-proxysql-1.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-2 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + local pod=scaling-proxysql-proxysql-2 + local container_name= + set +o xtrace + grep scaling-proxysql-proxysql-2 scaling-proxysql-proxysql-2.scaling-proxysql-proxysql-unready.scaling-proxysql-12663.svc.cluster.local + desc 'scale down from 3 to 1' + set +o xtrace ----------------------------------------------------------------------------------- scale down from 3 to 1 ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/conf/scaling-proxysql.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scaling-proxysql-12663~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_OUT=/tmp/tmp.ZZu3nBZFEP + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + local LAST_ERR=/tmp/tmp.r9dJ5UdE1J + local exit_status=0 + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ZZu3nBZFEP perconaxtradbcluster.pxc.percona.com/scaling-proxysql configured + cat /tmp/tmp.r9dJ5UdE1J + rm /tmp/tmp.ZZu3nBZFEP /tmp/tmp.r9dJ5UdE1J + return 0 + desc 'check if Pod deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod deleted ----------------------------------------------------------------------------------- + wait_for_delete pod/scaling-proxysql-proxysql-2 + local res=pod/scaling-proxysql-proxysql-2 + set +o xtrace pod/scaling-proxysql-proxysql-2 - .Error from server (NotFound): pods "scaling-proxysql-proxysql-2" not found + desc 'check if PVC not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if PVC not deleted ----------------------------------------------------------------------------------- + compare_kubectl pvc/proxydata-scaling-proxysql-proxysql-2 + local resource=pvc/proxydata-scaling-proxysql-proxysql-2 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2.yml + local new_result=/tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-2.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml pvc/proxydata-scaling-proxysql-proxysql-2 + yq d - '**.creationTimestamp' ++ mktemp + local LAST_OUT=/tmp/tmp.V5nxNqAr5J + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' ++ mktemp + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.dataSource' + yq d - spec.nodeName + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - spec.volumeMode + local LAST_ERR=/tmp/tmp.kmBuaHolBI + local exit_status=0 + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.procMount' + yq d - '**."percona.com/*"' + yq d - '**.storageClassName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.finalizers' + yq d - '**.volumeName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.nodePort' + yq d - '**.(name==NAMESPACE)' + yq d - status + yq d - '**.enableServiceLinks' + yq d - '**.(name==suffix)' + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.resourceVersion ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pvc/proxydata-scaling-proxysql-proxysql-2 + yq d - '**.uid' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilies + yq d - '**.preemptionPolicy' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - spec.ipFamilyPolicy + yq d - '**.controller-uid' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.namespace' + /usr/bin/sed s/scaling-proxysql-12663/namespace/g + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.V5nxNqAr5J + cat /tmp/tmp.kmBuaHolBI + rm /tmp/tmp.V5nxNqAr5J /tmp/tmp.kmBuaHolBI + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/scaling-proxysql/compare/pvc_proxydata-scaling-proxysql-proxysql-2.yml /tmp/tmp.030vUcOiNJ/pvc_proxydata-scaling-proxysql-proxysql-2.yml + desc 'check if Pod deleted from ProxySQL' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod deleted from ProxySQL ----------------------------------------------------------------------------------- + sleep 30 + run_mysql_local 'SELECT hostname FROM runtime_proxysql_servers;' '-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' scaling-proxysql-proxysql-0 + local 'command=SELECT hostname FROM runtime_proxysql_servers;' + local 'uri=-h127.0.0.1 -P6032 -uproxyadmin -padmin_password' + grep scaling-proxysql-proxysql-2 + local pod=scaling-proxysql-proxysql-0 + local container_name= + set +o xtrace + : + destroy scaling-proxysql-12663 + local namespace=scaling-proxysql-12663 + local ignore_logs=false + [[ false == \f\a\l\s\e ]] + grep -v 'the object has been modified' + grep -v level=info ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.030vUcOiNJ/operator.log ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.4jf5AhHa56 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LB2i17x9OH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.4jf5AhHa56 ++ cat /tmp/tmp.LB2i17x9OH ++ rm /tmp/tmp.4jf5AhHa56 /tmp/tmp.LB2i17x9OH ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-rnk5w ++ mktemp + local LAST_OUT=/tmp/tmp.S21nJliB8f ++ mktemp + local LAST_ERR=/tmp/tmp.t386CGLInX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-rnk5w + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.S21nJliB8f + cat /tmp/tmp.t386CGLInX + rm /tmp/tmp.S21nJliB8f /tmp/tmp.t386CGLInX + return 0 I0412 09:34:03.149325 1 request.go:645] Throttling request took 1.043706013s, request: GET:https://10.123.240.1:443/apis/pxc.percona.com/v1?timeout=32s {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: command terminated with exit code 1 / / ERROR (line:633) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n","errorVerbose":"exec syncusers: command terminated with exit code 1 / / ERROR (line:633) : The cluster (with writer hostgroup:11) has not been configured in ProxySQL\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: not found"} {"level":"info",,"caller":"pxc/version.go:328","msg":"update PXC version (fetched from db)","new version":"8.0.27-18.1"} {"level":"info",,"caller":"v1/pxc_types.go:874","msg":"ProxySQL size will be changed from 1 to 2 due to safe config"} {"level":"info",,"caller":"v1/pxc_types.go:875","msg":"Set allowUnsafeConfigurations=true to disable safe configuration"} {"level":"info",,"logger":"cmd","msg":"Git commit: 706f792ae47c369cb3556faff186b6873a8a247f Git branch: PR-1125-706f792a Build time: 2022-04-12T09:09:41Z"} {"level":"info",,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info",,"logger":"cmd","msg":"Go Version: go1.17.8"} {"level":"info",,"logger":"cmd","msg":"operator-sdk Version: v0.19.4"} {"level":"info",,"logger":"cmd","msg":"Registering Components."} {"level":"info",,"logger":"cmd","msg":"Runs on","platform":"kubernetes","version":"v1.20.15-gke.4100"} {"level":"info",,"logger":"cmd","msg":"Starting the Cmd."} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} {"level":"info",,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"registering webhook","path":"/validate-percona-xtradbcluster"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"serving webhook server","host":"","port":9443} {"level":"info",,"logger":"controller-runtime.webhook.webhooks","msg":"starting webhook server"} {"level":"info",,"logger":"leader","msg":"Became the leader."} {"level":"info",,"logger":"leader","msg":"No pre-existing lock was found."} {"level":"info",,"logger":"leader","msg":"Trying to become the leader."} + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + kubectl patch pxc -n scaling-proxysql-12663 scaling-proxysql --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/scaling-proxysql patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.UZW6R3qKqh ++ mktemp + local LAST_ERR=/tmp/tmp.ktC0JH9buV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.UZW6R3qKqh perconaxtradbcluster.pxc.percona.com "scaling-proxysql" deleted + cat /tmp/tmp.ktC0JH9buV + rm /tmp/tmp.UZW6R3qKqh /tmp/tmp.ktC0JH9buV + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.JX0cFznr1c ++ mktemp + local LAST_ERR=/tmp/tmp.Ygri9rSxWn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.JX0cFznr1c No resources found + cat /tmp/tmp.Ygri9rSxWn + rm /tmp/tmp.JX0cFznr1c /tmp/tmp.Ygri9rSxWn + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.U67OzgtNQf ++ mktemp + local LAST_ERR=/tmp/tmp.a6tEd6vPwh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.U67OzgtNQf No resources found + cat /tmp/tmp.a6tEd6vPwh + rm /tmp/tmp.U67OzgtNQf /tmp/tmp.a6tEd6vPwh + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.xjI2TRLjmB ++ mktemp + local LAST_ERR=/tmp/tmp.3KaQltn2nT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.xjI2TRLjmB validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.3KaQltn2nT + rm /tmp/tmp.xjI2TRLjmB /tmp/tmp.3KaQltn2nT + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace scaling-proxysql-12663 + rm -rf /tmp/tmp.030vUcOiNJ + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QNgINuoOMi ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.bfreuyeRBK + local LAST_ERR=/tmp/tmp.I00XEvrfLo + local exit_status=0 ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace scaling-proxysql-12663 + local LAST_ERR=/tmp/tmp.8ketYRO8RV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace pxc-operator