Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/logs/auto-tuning-8-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + cluster=auto-tuning + create_infra auto-tuning-20749 + local ns=auto-tuning-20749 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.8IO7lwVHva ++ mktemp + local LAST_ERR=/tmp/tmp.zZ9Q4C9fr4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8IO7lwVHva No resources found + cat /tmp/tmp.zZ9Q4C9fr4 + rm /tmp/tmp.8IO7lwVHva /tmp/tmp.zZ9Q4C9fr4 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.eAOQePari5 ++ mktemp + local LAST_ERR=/tmp/tmp.aX3OUP60Sr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eAOQePari5 No resources found + cat /tmp/tmp.aX3OUP60Sr + rm /tmp/tmp.eAOQePari5 /tmp/tmp.aX3OUP60Sr + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.yajnC6DuNN ++ mktemp + local LAST_ERR=/tmp/tmp.5B3VdIBWoU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yajnC6DuNN No resources found + cat /tmp/tmp.5B3VdIBWoU + rm /tmp/tmp.yajnC6DuNN /tmp/tmp.5B3VdIBWoU + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' ++ mktemp + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4YNIJteNjQ + awk '{print$1}' ++ mktemp + local LAST_ERR=/tmp/tmp.tYxjAG7vjt + local exit_status=0 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.4DXu9qLpBw + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.pR5ng5Wdin + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4YNIJteNjQ + cat /tmp/tmp.tYxjAG7vjt + rm /tmp/tmp.4YNIJteNjQ /tmp/tmp.tYxjAG7vjt + return 0 namespace "auto-tuning-5940" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4DXu9qLpBw namespace "pxc-operator" deleted + cat /tmp/tmp.pR5ng5Wdin + rm /tmp/tmp.4DXu9qLpBw /tmp/tmp.pR5ng5Wdin + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.lzDeMohrrm ++ mktemp + local LAST_ERR=/tmp/tmp.qeVAGtZfVm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lzDeMohrrm namespace/pxc-operator created + cat /tmp/tmp.qeVAGtZfVm + rm /tmp/tmp.lzDeMohrrm /tmp/tmp.qeVAGtZfVm + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sh8PUMJ0HG +++ mktemp ++ local LAST_ERR=/tmp/tmp.FqEmFDlqB2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Sh8PUMJ0HG ++ cat /tmp/tmp.FqEmFDlqB2 ++ rm /tmp/tmp.Sh8PUMJ0HG /tmp/tmp.FqEmFDlqB2 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yZetRfjvtC ++ mktemp + local LAST_ERR=/tmp/tmp.VdQkiU8SOV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yZetRfjvtC Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2" modified. + cat /tmp/tmp.VdQkiU8SOV + rm /tmp/tmp.yZetRfjvtC /tmp/tmp.VdQkiU8SOV + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.b6m4A0Ckqe ++ mktemp + local LAST_ERR=/tmp/tmp.ueCdq5P5y2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.b6m4A0Ckqe customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.ueCdq5P5y2 + rm /tmp/tmp.b6m4A0Ckqe /tmp/tmp.ueCdq5P5y2 + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nXMQ9uNJQv ++ mktemp + local LAST_ERR=/tmp/tmp.5KErbaN7Zr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nXMQ9uNJQv clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.5KErbaN7Zr + rm /tmp/tmp.nXMQ9uNJQv /tmp/tmp.5KErbaN7Zr + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - ++ mktemp + local LAST_OUT=/tmp/tmp.pUtHObtkgl ++ mktemp + local LAST_ERR=/tmp/tmp.Wxkw7lazIk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pUtHObtkgl deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.Wxkw7lazIk + rm /tmp/tmp.pUtHObtkgl /tmp/tmp.Wxkw7lazIk + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.cOe416Ay4i ++ mktemp + local LAST_ERR=/tmp/tmp.yFJOYqMfIM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cOe416Ay4i pod/percona-xtradb-cluster-operator-5f94b588fb-d2qln condition met + cat /tmp/tmp.yFJOYqMfIM + rm /tmp/tmp.cOe416Ay4i /tmp/tmp.yFJOYqMfIM + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gLSancag4v +++ mktemp ++ local LAST_ERR=/tmp/tmp.p0ObZk01ds ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gLSancag4v ++ cat /tmp/tmp.p0ObZk01ds ++ rm /tmp/tmp.gLSancag4v /tmp/tmp.p0ObZk01ds ++ return 0 + wait_pod percona-xtradb-cluster-operator-5f94b588fb-d2qln 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5f94b588fb-d2qln + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-5f94b588fb-d2qln ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-5f94b588fb-d2qln condition met percona-xtradb-cluster-operator-5f94b588fb-d2qln.Ok + sleep 3 + create_namespace auto-tuning-20749 + local namespace=auto-tuning-20749 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces auto-tuning-20749' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces auto-tuning-20749 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace auto-tuning-20749 + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.f67DoGP7r6 ++ mktemp + kubectl_bin get ns + local LAST_ERR=/tmp/tmp.g7NK1DjATh + local exit_status=0 ++ seq 0 2 ++ mktemp + local LAST_OUT=/tmp/tmp.FcgyGxXhO8 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace auto-tuning-20749 + local LAST_ERR=/tmp/tmp.qpqAho1Nqw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace auto-tuning-20749 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FcgyGxXhO8 + cat /tmp/tmp.qpqAho1Nqw + rm /tmp/tmp.FcgyGxXhO8 /tmp/tmp.qpqAho1Nqw + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace auto-tuning-20749 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.f67DoGP7r6 + cat /tmp/tmp.g7NK1DjATh Error from server (NotFound): namespaces "auto-tuning-20749" not found + rm /tmp/tmp.f67DoGP7r6 /tmp/tmp.g7NK1DjATh + return 1 + : + wait_for_delete namespace/auto-tuning-20749 + local res=namespace/auto-tuning-20749 + echo -n 'namespace/auto-tuning-20749 - ' namespace/auto-tuning-20749 - + set +o xtrace Error from server (NotFound): namespaces "auto-tuning-20749" not found + desc 'create namespace auto-tuning-20749' + set +o xtrace ----------------------------------------------------------------------------------- create namespace auto-tuning-20749 ----------------------------------------------------------------------------------- + kubectl_bin create namespace auto-tuning-20749 ++ mktemp + local LAST_OUT=/tmp/tmp.lltF5zbJYI ++ mktemp + local LAST_ERR=/tmp/tmp.CG9hBrq3jk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace auto-tuning-20749 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lltF5zbJYI namespace/auto-tuning-20749 created + cat /tmp/tmp.CG9hBrq3jk + rm /tmp/tmp.lltF5zbJYI /tmp/tmp.CG9hBrq3jk + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.53PEPSTvWM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hf6F7pIaDf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.53PEPSTvWM ++ cat /tmp/tmp.Hf6F7pIaDf ++ rm /tmp/tmp.53PEPSTvWM /tmp/tmp.Hf6F7pIaDf ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2 --namespace=auto-tuning-20749 ++ mktemp + local LAST_OUT=/tmp/tmp.PQKadv3YMz ++ mktemp + local LAST_ERR=/tmp/tmp.2Uf1Y3OhPs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2 --namespace=auto-tuning-20749 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PQKadv3YMz Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1709-788cbf69-3-cluster2" modified. + cat /tmp/tmp.2Uf1Y3OhPs + rm /tmp/tmp.PQKadv3YMz /tmp/tmp.2Uf1Y3OhPs + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dArBa13Cod ++ mktemp + local LAST_ERR=/tmp/tmp.iEQmNHQotx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dArBa13Cod secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.iEQmNHQotx + rm /tmp/tmp.dArBa13Cod /tmp/tmp.iEQmNHQotx + return 0 + spinup_pxc auto-tuning /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-limits.yml + local cluster=auto-tuning + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-limits.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.5kmgUd5DRm ++ mktemp + local LAST_ERR=/tmp/tmp.MseiM0L639 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5kmgUd5DRm secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.MseiM0L639 + rm /tmp/tmp.5kmgUd5DRm /tmp/tmp.MseiM0L639 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ mktemp + local LAST_OUT=/tmp/tmp.vr7F5mAecE ++ mktemp + local LAST_ERR=/tmp/tmp.Azv8Wiz0lp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vr7F5mAecE deployment.apps/pxc-client created + cat /tmp/tmp.Azv8Wiz0lp + rm /tmp/tmp.vr7F5mAecE /tmp/tmp.Azv8Wiz0lp + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-limits.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-limits.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-limits.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.VyBeP7SnFE + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_ERR=/tmp/tmp.LgSotdVubj + local exit_status=0 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VyBeP7SnFE perconaxtradbcluster.pxc.percona.com/auto-tuning created + cat /tmp/tmp.LgSotdVubj + rm /tmp/tmp.VyBeP7SnFE /tmp/tmp.LgSotdVubj + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy auto-tuning ++ local target_cluster=auto-tuning +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.47rylz88yh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.utqT3aeYuO +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.47rylz88yh +++ cat /tmp/tmp.utqT3aeYuO +++ rm /tmp/tmp.47rylz88yh /tmp/tmp.utqT3aeYuO +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3nFpsNv4u7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6eeh6CygbC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.3nFpsNv4u7 +++ cat /tmp/tmp.6eeh6CygbC +++ rm /tmp/tmp.3nFpsNv4u7 /tmp/tmp.6eeh6CygbC +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo auto-tuning-proxysql ++ return + local proxy=auto-tuning-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n auto-tuning-20749 ++ mktemp + local LAST_OUT=/tmp/tmp.BfslOan3lU ++ mktemp + local LAST_ERR=/tmp/tmp.DQXoRTp4jX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n auto-tuning-20749 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n auto-tuning-20749 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n auto-tuning-20749 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.BfslOan3lU + cat /tmp/tmp.DQXoRTp4jX error: no matching resources found + rm /tmp/tmp.BfslOan3lU /tmp/tmp.DQXoRTp4jX + return 1 + true + wait_for_running auto-tuning-proxysql 1 + local name=auto-tuning-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod auto-tuning-proxysql-0 480 + local pod=auto-tuning-proxysql-0 + local max_retry=480 + local ns= ++ echo auto-tuning-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/auto-tuning-proxysql-0 condition met auto-tuning-proxysql-0.Ok + wait_for_running auto-tuning-pxc 3 + local name=auto-tuning-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod auto-tuning-pxc-0 480 + local pod=auto-tuning-pxc-0 + local max_retry=480 + local ns= ++ echo auto-tuning-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/auto-tuning-pxc-0 condition met auto-tuning-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod auto-tuning-pxc-1 480 + local pod=auto-tuning-pxc-1 + local max_retry=480 + local ns= ++ echo auto-tuning-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/auto-tuning-pxc-1 condition met auto-tuning-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod auto-tuning-pxc-2 480 + local pod=auto-tuning-pxc-2 + local max_retry=480 + local ns= ++ echo auto-tuning-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/auto-tuning-pxc-2 condition met auto-tuning-pxc-2.Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h auto-tuning-proxysql -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h auto-tuning-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6OxpMb4lm1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nCHBPXrjyb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6OxpMb4lm1 ++ cat /tmp/tmp.nCHBPXrjyb ++ rm /tmp/tmp.6OxpMb4lm1 /tmp/tmp.nCHBPXrjyb ++ return 0 + client_pod=pxc-client-6644d8898f-nsz65 + wait_pod pxc-client-6644d8898f-nsz65 + local pod=pxc-client-6644d8898f-nsz65 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-nsz65 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h auto-tuning-proxysql -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h auto-tuning-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BPWMVzXleq +++ mktemp ++ local LAST_ERR=/tmp/tmp.hMwxBdFFze ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BPWMVzXleq ++ cat /tmp/tmp.hMwxBdFFze ++ rm /tmp/tmp.BPWMVzXleq /tmp/tmp.hMwxBdFFze ++ return 0 + client_pod=pxc-client-6644d8898f-nsz65 + wait_pod pxc-client-6644d8898f-nsz65 + local pod=pxc-client-6644d8898f-nsz65 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-nsz65 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-0.auto-tuning-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-0.auto-tuning-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-0.auto-tuning-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-0.auto-tuning-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.boGJh46Tgn +++ mktemp ++ local LAST_ERR=/tmp/tmp.GgGuw4me71 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.boGJh46Tgn ++ cat /tmp/tmp.GgGuw4me71 ++ rm /tmp/tmp.boGJh46Tgn /tmp/tmp.GgGuw4me71 ++ return 0 + client_pod=pxc-client-6644d8898f-nsz65 + wait_pod pxc-client-6644d8898f-nsz65 + local pod=pxc-client-6644d8898f-nsz65 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-nsz65 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.xsZGDOMGgG/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql /tmp/tmp.xsZGDOMGgG/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-1.auto-tuning-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-1.auto-tuning-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-1.auto-tuning-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-1.auto-tuning-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gn5266ODxg +++ mktemp ++ local LAST_ERR=/tmp/tmp.HU0dxT0wkX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gn5266ODxg ++ cat /tmp/tmp.HU0dxT0wkX ++ rm /tmp/tmp.gn5266ODxg /tmp/tmp.HU0dxT0wkX ++ return 0 + client_pod=pxc-client-6644d8898f-nsz65 + wait_pod pxc-client-6644d8898f-nsz65 + local pod=pxc-client-6644d8898f-nsz65 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-nsz65 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.xsZGDOMGgG/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql /tmp/tmp.xsZGDOMGgG/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-2.auto-tuning-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-2.auto-tuning-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h auto-tuning-pxc-2.auto-tuning-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h auto-tuning-pxc-2.auto-tuning-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.njdvmEntiY +++ mktemp ++ local LAST_ERR=/tmp/tmp.7VXzeClkoZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.njdvmEntiY ++ cat /tmp/tmp.7VXzeClkoZ ++ rm /tmp/tmp.njdvmEntiY /tmp/tmp.7VXzeClkoZ ++ return 0 + client_pod=pxc-client-6644d8898f-nsz65 + wait_pod pxc-client-6644d8898f-nsz65 + local pod=pxc-client-6644d8898f-nsz65 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-nsz65 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.xsZGDOMGgG/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/compare/select-1.sql /tmp/tmp.xsZGDOMGgG/select-1.sql ++ is_keyring_plugin_in_use auto-tuning ++ local cluster=auto-tuning ++ kubectl_bin exec -it auto-tuning-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b2y7kemvAd +++ mktemp ++ local LAST_ERR=/tmp/tmp.iFzLX3EJwv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it auto-tuning-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.b2y7kemvAd ++ cat /tmp/tmp.iFzLX3EJwv Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.b2y7kemvAd /tmp/tmp.iFzLX3EJwv ++ return 0 + '[' '' ']' ++ run_mysql 'SELECT @@innodb_buffer_pool_size;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@innodb_buffer_pool_size;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YpQnOLU33q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BgksTcZecC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.YpQnOLU33q +++ cat /tmp/tmp.BgksTcZecC +++ rm /tmp/tmp.YpQnOLU33q /tmp/tmp.BgksTcZecC +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ egrep '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + INNODB_SIZE=536870912 ++ run_mysql 'SELECT @@max_connections;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@max_connections;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t42ot188yC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C1d5yKuPLM +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.t42ot188yC +++ cat /tmp/tmp.C1d5yKuPLM +++ rm /tmp/tmp.t42ot188yC /tmp/tmp.C1d5yKuPLM +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + CONNECTIONS=85 + desc 'get cm_buffer_size' + set +o xtrace ----------------------------------------------------------------------------------- get cm_buffer_size ----------------------------------------------------------------------------------- ++ get_variable_from_cm innodb_buffer_pool_size ++ kubectl_bin get configmap auto-auto-tuning-pxc -o yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.5bsFHFTuWM +++ mktemp ++ local LAST_ERR=/tmp/tmp.94BsglItC2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get configmap auto-auto-tuning-pxc -o yaml ++ awk '{print $3}' ++ grep -oE 'innodb_buffer_pool_size = [0-9]+' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5bsFHFTuWM ++ cat /tmp/tmp.94BsglItC2 ++ rm /tmp/tmp.5bsFHFTuWM /tmp/tmp.94BsglItC2 ++ return 0 + cm_buffer_size=536870912 + [[ 536870912 != 536870912 ]] + desc 'get cm_max_connections' + set +o xtrace ----------------------------------------------------------------------------------- get cm_max_connections ----------------------------------------------------------------------------------- ++ get_variable_from_cm max_connections ++ grep -oE 'max_connections = [0-9]+' ++ awk '{print $3}' ++ kubectl_bin get configmap auto-auto-tuning-pxc -o yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.gFGaM1T4zA +++ mktemp ++ local LAST_ERR=/tmp/tmp.6v9Penefit ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get configmap auto-auto-tuning-pxc -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gFGaM1T4zA ++ cat /tmp/tmp.6v9Penefit ++ rm /tmp/tmp.gFGaM1T4zA /tmp/tmp.6v9Penefit ++ return 0 + cm_max_connections=85 + [[ 85 != 85 ]] + desc 'with-requests: apply config and wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- with-requests: apply config and wait cluster consistency ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-requests.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-requests.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-requests.yml + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + local LAST_OUT=/tmp/tmp.eCR5q2LkkR ++ mktemp + local LAST_ERR=/tmp/tmp.tkYkHNlHV6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eCR5q2LkkR perconaxtradbcluster.pxc.percona.com/auto-tuning configured + cat /tmp/tmp.tkYkHNlHV6 + rm /tmp/tmp.eCR5q2LkkR /tmp/tmp.tkYkHNlHV6 + return 0 + wait_cluster_consistency auto-tuning 3 + local cluster_name=auto-tuning + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size auto-tuning ++ local cluster=auto-tuning +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.48IjvXeKhS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sYdwbZJz51 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.48IjvXeKhS +++ cat /tmp/tmp.sYdwbZJz51 +++ rm /tmp/tmp.48IjvXeKhS /tmp/tmp.sYdwbZJz51 +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xsETn8rYTV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SlOCOQzBA1 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.xsETn8rYTV +++ cat /tmp/tmp.SlOCOQzBA1 +++ rm /tmp/tmp.xsETn8rYTV /tmp/tmp.SlOCOQzBA1 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.brnPJgUYAk +++ mktemp ++ local LAST_ERR=/tmp/tmp.22jGoEvb7O ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.brnPJgUYAk ++ cat /tmp/tmp.22jGoEvb7O ++ rm /tmp/tmp.brnPJgUYAk /tmp/tmp.22jGoEvb7O ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aSy1f8rsUf +++ mktemp ++ local LAST_ERR=/tmp/tmp.zRLeGutTVq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aSy1f8rsUf ++ cat /tmp/tmp.zRLeGutTVq ++ rm /tmp/tmp.aSy1f8rsUf /tmp/tmp.zRLeGutTVq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sN1OOl3pnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.COhod5tqZ9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sN1OOl3pnk ++ cat /tmp/tmp.COhod5tqZ9 ++ rm /tmp/tmp.sN1OOl3pnk /tmp/tmp.COhod5tqZ9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kpLhIGIfq4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XmPjY6lcQO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kpLhIGIfq4 ++ cat /tmp/tmp.XmPjY6lcQO ++ rm /tmp/tmp.kpLhIGIfq4 /tmp/tmp.XmPjY6lcQO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dkZSrvp3mU +++ mktemp ++ local LAST_ERR=/tmp/tmp.MZpJ4DRIF1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dkZSrvp3mU ++ cat /tmp/tmp.MZpJ4DRIF1 ++ rm /tmp/tmp.dkZSrvp3mU /tmp/tmp.MZpJ4DRIF1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uIuLPJFh2h +++ mktemp ++ local LAST_ERR=/tmp/tmp.7jPw79nftn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uIuLPJFh2h ++ cat /tmp/tmp.7jPw79nftn ++ rm /tmp/tmp.uIuLPJFh2h /tmp/tmp.7jPw79nftn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jxalq0lElV +++ mktemp ++ local LAST_ERR=/tmp/tmp.KIPtuvkDQV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jxalq0lElV ++ cat /tmp/tmp.KIPtuvkDQV ++ rm /tmp/tmp.jxalq0lElV /tmp/tmp.KIPtuvkDQV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r7HVIRdB1K +++ mktemp ++ local LAST_ERR=/tmp/tmp.rcFUiz09WT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.r7HVIRdB1K ++ cat /tmp/tmp.rcFUiz09WT ++ rm /tmp/tmp.r7HVIRdB1K /tmp/tmp.rcFUiz09WT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LMnlZcJij2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WTCVHirpo6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LMnlZcJij2 ++ cat /tmp/tmp.WTCVHirpo6 ++ rm /tmp/tmp.LMnlZcJij2 /tmp/tmp.WTCVHirpo6 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2AFrBbrDrp +++ mktemp ++ local LAST_ERR=/tmp/tmp.nDKax6r2MS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2AFrBbrDrp ++ cat /tmp/tmp.nDKax6r2MS ++ rm /tmp/tmp.2AFrBbrDrp /tmp/tmp.nDKax6r2MS ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine auto-tuning +++ local cluster_name=auto-tuning ++++ get_proxy auto-tuning ++++ local target_cluster=auto-tuning +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.h7QPKlex57 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.rwvsLPOkTc +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.h7QPKlex57 +++++ cat /tmp/tmp.rwvsLPOkTc +++++ rm /tmp/tmp.h7QPKlex57 /tmp/tmp.rwvsLPOkTc +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.VOORS1OWNQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Laa8LyuP2a +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.VOORS1OWNQ +++++ cat /tmp/tmp.Laa8LyuP2a +++++ rm /tmp/tmp.VOORS1OWNQ /tmp/tmp.Laa8LyuP2a +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo auto-tuning-proxysql ++++ return +++ local cluster_proxy=auto-tuning-proxysql +++ echo proxysql ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RtRTlQJQxW +++ mktemp ++ local LAST_ERR=/tmp/tmp.hqHxR0JO90 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RtRTlQJQxW ++ cat /tmp/tmp.hqHxR0JO90 ++ rm /tmp/tmp.RtRTlQJQxW /tmp/tmp.hqHxR0JO90 ++ return 0 + [[ 3 == \3 ]] + does_autotune_cm_exists + local exit_status=0 + set +e + kubectl_bin get configmap auto-auto-tuning-pxc -o 'jsonpath={.metadata.name}' ++ mktemp + local LAST_OUT=/tmp/tmp.1yNYcA40wr ++ mktemp + local LAST_ERR=/tmp/tmp.2gjKsJPNPX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get configmap auto-auto-tuning-pxc -o 'jsonpath={.metadata.name}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl get configmap auto-auto-tuning-pxc -o 'jsonpath={.metadata.name}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl get configmap auto-auto-tuning-pxc -o 'jsonpath={.metadata.name}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.1yNYcA40wr + cat /tmp/tmp.2gjKsJPNPX Error from server (NotFound): configmaps "auto-auto-tuning-pxc" not found + rm /tmp/tmp.1yNYcA40wr /tmp/tmp.2gjKsJPNPX + return 1 + exit_status=1 + set -e + return 1 + desc 'with-custom-config: apply config and wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- with-custom-config: apply config and wait cluster consistency ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-custom-config.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-custom-config.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-custom-config.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + local LAST_OUT=/tmp/tmp.rOBfX3ryEC + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_ERR=/tmp/tmp.xCFneBkjYC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rOBfX3ryEC perconaxtradbcluster.pxc.percona.com/auto-tuning configured + cat /tmp/tmp.xCFneBkjYC + rm /tmp/tmp.rOBfX3ryEC /tmp/tmp.xCFneBkjYC + return 0 + wait_cluster_consistency auto-tuning 3 + local cluster_name=auto-tuning + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size auto-tuning ++ local cluster=auto-tuning +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZW86rRZZ2x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KMsPIXVzoV +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ZW86rRZZ2x +++ cat /tmp/tmp.KMsPIXVzoV +++ rm /tmp/tmp.ZW86rRZZ2x /tmp/tmp.KMsPIXVzoV +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8WodWHW9qE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sINHVQQZIi +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.8WodWHW9qE +++ cat /tmp/tmp.sINHVQQZIi +++ rm /tmp/tmp.8WodWHW9qE /tmp/tmp.sINHVQQZIi +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c5dXRb5Bm4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UfbITJ0nJs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.c5dXRb5Bm4 ++ cat /tmp/tmp.UfbITJ0nJs ++ rm /tmp/tmp.c5dXRb5Bm4 /tmp/tmp.UfbITJ0nJs ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mpJxaMjlUi +++ mktemp ++ local LAST_ERR=/tmp/tmp.ea5YSPPVMz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mpJxaMjlUi ++ cat /tmp/tmp.ea5YSPPVMz ++ rm /tmp/tmp.mpJxaMjlUi /tmp/tmp.ea5YSPPVMz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J1eoWco5Ja +++ mktemp ++ local LAST_ERR=/tmp/tmp.3oQvTQArXJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.J1eoWco5Ja ++ cat /tmp/tmp.3oQvTQArXJ ++ rm /tmp/tmp.J1eoWco5Ja /tmp/tmp.3oQvTQArXJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N6j1es28HN +++ mktemp ++ local LAST_ERR=/tmp/tmp.OJ4T3mpRcw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.N6j1es28HN ++ cat /tmp/tmp.OJ4T3mpRcw ++ rm /tmp/tmp.N6j1es28HN /tmp/tmp.OJ4T3mpRcw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3FklXs5db1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ENM57evH6n ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3FklXs5db1 ++ cat /tmp/tmp.ENM57evH6n ++ rm /tmp/tmp.3FklXs5db1 /tmp/tmp.ENM57evH6n ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r32MQ5P4ZD +++ mktemp ++ local LAST_ERR=/tmp/tmp.NYUyI1hDyz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.r32MQ5P4ZD ++ cat /tmp/tmp.NYUyI1hDyz ++ rm /tmp/tmp.r32MQ5P4ZD /tmp/tmp.NYUyI1hDyz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Lq5TA0c3e +++ mktemp ++ local LAST_ERR=/tmp/tmp.GEIpoHA3iG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5Lq5TA0c3e ++ cat /tmp/tmp.GEIpoHA3iG ++ rm /tmp/tmp.5Lq5TA0c3e /tmp/tmp.GEIpoHA3iG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G15cqPXEgg +++ mktemp ++ local LAST_ERR=/tmp/tmp.KFRdJIETPq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.G15cqPXEgg ++ cat /tmp/tmp.KFRdJIETPq ++ rm /tmp/tmp.G15cqPXEgg /tmp/tmp.KFRdJIETPq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vXzzRuq5R2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NJbcTDJpBq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vXzzRuq5R2 ++ cat /tmp/tmp.NJbcTDJpBq ++ rm /tmp/tmp.vXzzRuq5R2 /tmp/tmp.NJbcTDJpBq ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vYngse7gl4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WeKwZdSHBH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vYngse7gl4 ++ cat /tmp/tmp.WeKwZdSHBH ++ rm /tmp/tmp.vYngse7gl4 /tmp/tmp.WeKwZdSHBH ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine auto-tuning +++ local cluster_name=auto-tuning ++++ get_proxy auto-tuning ++++ local target_cluster=auto-tuning +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.KVvLAiSZ3o ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.GYYhzPBlhC +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.KVvLAiSZ3o +++++ cat /tmp/tmp.GYYhzPBlhC +++++ rm /tmp/tmp.KVvLAiSZ3o /tmp/tmp.GYYhzPBlhC +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LJSSH1vgBj ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.MZA8jHUoCs +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.LJSSH1vgBj +++++ cat /tmp/tmp.MZA8jHUoCs +++++ rm /tmp/tmp.LJSSH1vgBj /tmp/tmp.MZA8jHUoCs +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo auto-tuning-proxysql ++++ return +++ local cluster_proxy=auto-tuning-proxysql +++ echo proxysql ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y8MA9LwDnY +++ mktemp ++ local LAST_ERR=/tmp/tmp.iLoU08HfwU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.y8MA9LwDnY ++ cat /tmp/tmp.iLoU08HfwU ++ rm /tmp/tmp.y8MA9LwDnY /tmp/tmp.iLoU08HfwU ++ return 0 + [[ 3 == \3 ]] ++ run_mysql 'SELECT @@innodb_buffer_pool_size;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@innodb_buffer_pool_size;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aQmNKbthla ++++ mktemp +++ local LAST_ERR=/tmp/tmp.E51vFqepvS +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aQmNKbthla +++ cat /tmp/tmp.E51vFqepvS +++ rm /tmp/tmp.aQmNKbthla /tmp/tmp.E51vFqepvS +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + INNODB_SIZE=805306368 ++ run_mysql 'SELECT @@max_connections;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@max_connections;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jPLCq3DYp2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bcLS2SUSdh +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.jPLCq3DYp2 +++ cat /tmp/tmp.bcLS2SUSdh +++ rm /tmp/tmp.jPLCq3DYp2 /tmp/tmp.bcLS2SUSdh +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ egrep '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + CONNECTIONS=200 + [[ 805306368 != 805306368 ]] + [[ 200 != 200 ]] + desc 'with-template: apply config and wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- with-template: apply config and wait cluster consistency ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + local LAST_OUT=/tmp/tmp.8y8mSmafEy ++ mktemp + local LAST_ERR=/tmp/tmp.8bLJKhK8Lx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8y8mSmafEy perconaxtradbcluster.pxc.percona.com/auto-tuning configured + cat /tmp/tmp.8bLJKhK8Lx + rm /tmp/tmp.8y8mSmafEy /tmp/tmp.8bLJKhK8Lx + return 0 + wait_cluster_consistency auto-tuning 3 + local cluster_name=auto-tuning + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size auto-tuning ++ local cluster=auto-tuning +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VSL0FeoTfQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.APGLeVmufC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.VSL0FeoTfQ +++ cat /tmp/tmp.APGLeVmufC +++ rm /tmp/tmp.VSL0FeoTfQ /tmp/tmp.APGLeVmufC +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HXRlhf7kBi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ijek5mh6bz +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.HXRlhf7kBi +++ cat /tmp/tmp.Ijek5mh6bz +++ rm /tmp/tmp.HXRlhf7kBi /tmp/tmp.Ijek5mh6bz +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QjWMMDc2CL +++ mktemp ++ local LAST_ERR=/tmp/tmp.xAXGkTlZ2N ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QjWMMDc2CL ++ cat /tmp/tmp.xAXGkTlZ2N ++ rm /tmp/tmp.QjWMMDc2CL /tmp/tmp.xAXGkTlZ2N ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7wIPaN0VZY +++ mktemp ++ local LAST_ERR=/tmp/tmp.YjMR7jyYG5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7wIPaN0VZY ++ cat /tmp/tmp.YjMR7jyYG5 ++ rm /tmp/tmp.7wIPaN0VZY /tmp/tmp.YjMR7jyYG5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8WtrvNfpDI +++ mktemp ++ local LAST_ERR=/tmp/tmp.WDt1SqAzGO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8WtrvNfpDI ++ cat /tmp/tmp.WDt1SqAzGO ++ rm /tmp/tmp.8WtrvNfpDI /tmp/tmp.WDt1SqAzGO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6pKXWnwBDI +++ mktemp ++ local LAST_ERR=/tmp/tmp.anPO2hOM2r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6pKXWnwBDI ++ cat /tmp/tmp.anPO2hOM2r ++ rm /tmp/tmp.6pKXWnwBDI /tmp/tmp.anPO2hOM2r ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ROurKGm0NQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.DXJT1dqTWm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ROurKGm0NQ ++ cat /tmp/tmp.DXJT1dqTWm ++ rm /tmp/tmp.ROurKGm0NQ /tmp/tmp.DXJT1dqTWm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WVHXnsxZjB +++ mktemp ++ local LAST_ERR=/tmp/tmp.aVNaELgCXz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WVHXnsxZjB ++ cat /tmp/tmp.aVNaELgCXz ++ rm /tmp/tmp.WVHXnsxZjB /tmp/tmp.aVNaELgCXz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZM8QJjFD6W +++ mktemp ++ local LAST_ERR=/tmp/tmp.FpyVEVgCY1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZM8QJjFD6W ++ cat /tmp/tmp.FpyVEVgCY1 ++ rm /tmp/tmp.ZM8QJjFD6W /tmp/tmp.FpyVEVgCY1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JAjxZ90qkA +++ mktemp ++ local LAST_ERR=/tmp/tmp.yieL21Cn0B ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JAjxZ90qkA ++ cat /tmp/tmp.yieL21Cn0B ++ rm /tmp/tmp.JAjxZ90qkA /tmp/tmp.yieL21Cn0B ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nPq9kqIzBn +++ mktemp ++ local LAST_ERR=/tmp/tmp.PZhrZRDVYD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nPq9kqIzBn ++ cat /tmp/tmp.PZhrZRDVYD ++ rm /tmp/tmp.nPq9kqIzBn /tmp/tmp.PZhrZRDVYD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SRsAtTTPDj +++ mktemp ++ local LAST_ERR=/tmp/tmp.J5M81NaQ99 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SRsAtTTPDj ++ cat /tmp/tmp.J5M81NaQ99 ++ rm /tmp/tmp.SRsAtTTPDj /tmp/tmp.J5M81NaQ99 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.geN4BD26Il +++ mktemp ++ local LAST_ERR=/tmp/tmp.7xc9Qk0kKC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.geN4BD26Il ++ cat /tmp/tmp.7xc9Qk0kKC ++ rm /tmp/tmp.geN4BD26Il /tmp/tmp.7xc9Qk0kKC ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine auto-tuning +++ local cluster_name=auto-tuning ++++ get_proxy auto-tuning ++++ local target_cluster=auto-tuning +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.34DUq1h0Dt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.MDIw4JijTr +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.34DUq1h0Dt +++++ cat /tmp/tmp.MDIw4JijTr +++++ rm /tmp/tmp.34DUq1h0Dt /tmp/tmp.MDIw4JijTr +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.haLinODZil ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.sugtgSzPTK +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.haLinODZil +++++ cat /tmp/tmp.sugtgSzPTK +++++ rm /tmp/tmp.haLinODZil /tmp/tmp.sugtgSzPTK +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo auto-tuning-proxysql ++++ return +++ local cluster_proxy=auto-tuning-proxysql +++ echo proxysql ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.281ICQx7JS +++ mktemp ++ local LAST_ERR=/tmp/tmp.LmFVLgyMoS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.281ICQx7JS ++ cat /tmp/tmp.LmFVLgyMoS ++ rm /tmp/tmp.281ICQx7JS /tmp/tmp.LmFVLgyMoS ++ return 0 + [[ 3 == \3 ]] ++ run_mysql 'SELECT @@innodb_buffer_pool_size;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@innodb_buffer_pool_size;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xhktzOPyFZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mVedx2iSjh +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.xhktzOPyFZ +++ cat /tmp/tmp.mVedx2iSjh +++ rm /tmp/tmp.xhktzOPyFZ /tmp/tmp.mVedx2iSjh +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ egrep '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + INNODB_SIZE=2147483648 ++ run_mysql 'SELECT @@max_connections;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@max_connections;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4TLiWz8OpA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fpPOY2GVOW +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.4TLiWz8OpA +++ cat /tmp/tmp.fpPOY2GVOW +++ rm /tmp/tmp.4TLiWz8OpA /tmp/tmp.fpPOY2GVOW +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + CONNECTIONS=200 + [[ 2147483648 != 2147483648 ]] + [[ 200 != 200 ]] + desc 'with-template-transform: apply config and wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- with-template-transform: apply config and wait cluster consistency ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template-transform.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template-transform.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1709/e2e-tests/auto-tuning/conf/auto-tuning-with-template-transform.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1709-788cbf69#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_OUT=/tmp/tmp.KDNNyL9TeT ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.auto-tuning-20749~ + local LAST_ERR=/tmp/tmp.FziJEx5c9Y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.KDNNyL9TeT perconaxtradbcluster.pxc.percona.com/auto-tuning configured + cat /tmp/tmp.FziJEx5c9Y + rm /tmp/tmp.KDNNyL9TeT /tmp/tmp.FziJEx5c9Y + return 0 + wait_cluster_consistency auto-tuning 3 + local cluster_name=auto-tuning + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size auto-tuning ++ local cluster=auto-tuning +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Kv3vTE0g4M ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oUa6V4Ldad +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Kv3vTE0g4M +++ cat /tmp/tmp.oUa6V4Ldad +++ rm /tmp/tmp.Kv3vTE0g4M /tmp/tmp.oUa6V4Ldad +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sw0239IuGM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GiY4YBafGP +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.sw0239IuGM +++ cat /tmp/tmp.GiY4YBafGP +++ rm /tmp/tmp.sw0239IuGM /tmp/tmp.GiY4YBafGP +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6EGIZXVG9b +++ mktemp ++ local LAST_ERR=/tmp/tmp.oOZyQvvgLh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6EGIZXVG9b ++ cat /tmp/tmp.oOZyQvvgLh ++ rm /tmp/tmp.6EGIZXVG9b /tmp/tmp.oOZyQvvgLh ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oLLnhvhcQa +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q58ssZHKbR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oLLnhvhcQa ++ cat /tmp/tmp.Q58ssZHKbR ++ rm /tmp/tmp.oLLnhvhcQa /tmp/tmp.Q58ssZHKbR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mveApKHPQi +++ mktemp ++ local LAST_ERR=/tmp/tmp.yxImA1PJc2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mveApKHPQi ++ cat /tmp/tmp.yxImA1PJc2 ++ rm /tmp/tmp.mveApKHPQi /tmp/tmp.yxImA1PJc2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zVTk03ccR5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tyiwwuRJgc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zVTk03ccR5 ++ cat /tmp/tmp.tyiwwuRJgc ++ rm /tmp/tmp.zVTk03ccR5 /tmp/tmp.tyiwwuRJgc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7zedoQ3OXm +++ mktemp ++ local LAST_ERR=/tmp/tmp.yxihuWrMne ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7zedoQ3OXm ++ cat /tmp/tmp.yxihuWrMne ++ rm /tmp/tmp.7zedoQ3OXm /tmp/tmp.yxihuWrMne ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S4y7HjiZPh +++ mktemp ++ local LAST_ERR=/tmp/tmp.zicBhyG2oP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.S4y7HjiZPh ++ cat /tmp/tmp.zicBhyG2oP ++ rm /tmp/tmp.S4y7HjiZPh /tmp/tmp.zicBhyG2oP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tF5JHrHZI7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.shaVadVnDq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tF5JHrHZI7 ++ cat /tmp/tmp.shaVadVnDq ++ rm /tmp/tmp.tF5JHrHZI7 /tmp/tmp.shaVadVnDq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZD83gw3WVg +++ mktemp ++ local LAST_ERR=/tmp/tmp.TAuEJ9ZTJ5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZD83gw3WVg ++ cat /tmp/tmp.TAuEJ9ZTJ5 ++ rm /tmp/tmp.ZD83gw3WVg /tmp/tmp.TAuEJ9ZTJ5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.amSQ5ZxwCL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qknz1MjNdE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.amSQ5ZxwCL ++ cat /tmp/tmp.Qknz1MjNdE ++ rm /tmp/tmp.amSQ5ZxwCL /tmp/tmp.Qknz1MjNdE ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fgcLEowTmR +++ mktemp ++ local LAST_ERR=/tmp/tmp.0N0M8IPdTQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fgcLEowTmR ++ cat /tmp/tmp.0N0M8IPdTQ ++ rm /tmp/tmp.fgcLEowTmR /tmp/tmp.0N0M8IPdTQ ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine auto-tuning +++ local cluster_name=auto-tuning ++++ get_proxy auto-tuning ++++ local target_cluster=auto-tuning +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.QOc5uOtMFt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.KifdqBLTnt +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.QOc5uOtMFt +++++ cat /tmp/tmp.KifdqBLTnt +++++ rm /tmp/tmp.QOc5uOtMFt /tmp/tmp.KifdqBLTnt +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.YX0u3IjGZZ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.rMvHASv7mW +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc auto-tuning -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.YX0u3IjGZZ +++++ cat /tmp/tmp.rMvHASv7mW +++++ rm /tmp/tmp.YX0u3IjGZZ /tmp/tmp.rMvHASv7mW +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo auto-tuning-proxysql ++++ return +++ local cluster_proxy=auto-tuning-proxysql +++ echo proxysql ++ kubectl_bin get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XM8E9pmuYu +++ mktemp ++ local LAST_ERR=/tmp/tmp.aMhh0l4HY1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc auto-tuning -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XM8E9pmuYu ++ cat /tmp/tmp.aMhh0l4HY1 ++ rm /tmp/tmp.XM8E9pmuYu /tmp/tmp.aMhh0l4HY1 ++ return 0 + [[ 3 == \3 ]] ++ run_mysql 'SELECT @@innodb_buffer_pool_size;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@innodb_buffer_pool_size;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GlNFgDviqh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4sWj4u7cSG +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.GlNFgDviqh +++ cat /tmp/tmp.4sWj4u7cSG +++ rm /tmp/tmp.GlNFgDviqh /tmp/tmp.4sWj4u7cSG +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ egrep '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + INNODB_SIZE=805306368 ++ run_mysql 'SELECT @@max_connections;' '-h auto-tuning-pxc -uroot -proot_password' ++ local 'command=SELECT @@max_connections;' ++ local 'uri=-h auto-tuning-pxc -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VQbVRxESEz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VJQocdrtGd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.VQbVRxESEz +++ cat /tmp/tmp.VJQocdrtGd +++ rm /tmp/tmp.VQbVRxESEz /tmp/tmp.VJQocdrtGd +++ return 0 ++ client_pod=pxc-client-6644d8898f-nsz65 ++ wait_pod pxc-client-6644d8898f-nsz65 ++ local pod=pxc-client-6644d8898f-nsz65 ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-nsz65 +++ egrep '^(pxc|proxysql)$' +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-nsz65 condition met pxc-client-6644d8898f-nsz65.Ok ++ set +o xtrace + CONNECTIONS=200 + [[ 805306368 != 805306368 ]] + [[ 200 != 200 ]] + destroy auto-tuning-20749 + local namespace=auto-tuning-20749 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info ++ get_operator_pod + grep -v 'get backup status: Job.batch' + grep -v 'the object has been modified' ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator + sort -u + tee /tmp/tmp.xsZGDOMGgG/operator.log +++ grep -c percona-xtradb-cluster-operator + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.RJVY9njA0v +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZSmZtyvqb1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RJVY9njA0v ++ cat /tmp/tmp.ZSmZtyvqb1 ++ rm /tmp/tmp.RJVY9njA0v /tmp/tmp.ZSmZtyvqb1 ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5f94b588fb-d2qln ++ mktemp + local LAST_OUT=/tmp/tmp.X5O1VLy7XN ++ mktemp + local LAST_ERR=/tmp/tmp.Fd6LhRGSOf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5f94b588fb-d2qln + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.X5O1VLy7XN + cat /tmp/tmp.Fd6LhRGSOf + rm /tmp/tmp.X5O1VLy7XN /tmp/tmp.Fd6LhRGSOf + return 0 2024-05-14T17:03:43.780Z INFO setup Manager starting up {"gitCommit": "788cbf696c092963711ae0e20fe3f2e71ef58ae6", "gitBranch": "PR-1709-788cbf69", "buildTime": "2024-05-14T16:51:12Z", "goVersion": "go1.22.3", "os": "linux", "arch": "amd64"} 2024-05-14T17:03:43.780Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.26.15-gke.1300000"} 2024-05-14T17:03:43.781Z INFO setup Registering Components. 2024-05-14T17:03:48.363Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2024-05-14T17:03:48.367Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2024-05-14T17:03:48.367Z INFO controller-runtime.metrics Starting metrics server 2024-05-14T17:03:48.367Z INFO controller-runtime.webhook Starting webhook server 2024-05-14T17:03:48.367Z INFO setup Starting the Cmd. 2024-05-14T17:03:48.367Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2024-05-14T17:03:48.368Z INFO controller-runtime.certwatcher Starting certificate watcher 2024-05-14T17:03:48.368Z INFO controller-runtime.certwatcher Updated current TLS certificate 2024-05-14T17:03:48.368Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2024-05-14T17:03:48.468Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2024-05-14T17:03:48.482Z DEBUG events percona-xtradb-cluster-operator-5f94b588fb-d2qln_b4b68f50-7e97-4b13-ab32-8554394013b8 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"e7fa96f0-997c-4c8c-8566-20c3216e2576","apiVersion":"coordination.k8s.io/v1","resourceVersion":"3400"}, "reason": "LeaderElection"} 2024-05-14T17:03:48.482Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2024-05-14T17:03:48.482Z INFO Starting Controller {"controller": "pxc-controller"} 2024-05-14T17:03:48.482Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2024-05-14T17:03:48.482Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2024-05-14T17:03:48.482Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2024-05-14T17:03:48.482Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2024-05-14T17:03:48.482Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2024-05-14T17:03:48.698Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2024-05-14T17:03:48.698Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2024-05-14T17:03:48.698Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2024-05-14T17:04:17.244Z INFO Set CR version {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "23166329-3859-49be-a925-fad948205d1c", "version": "1.15.0"} 2024-05-14T17:04:17.733Z INFO KubeAPIWarningLogger spec.template.spec.containers[0].resources.limits[memory]: fractional byte value "214748364800m" is invalid, must be an integer 2024-05-14T17:05:31.099Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e", "user": "operator"} 2024-05-14T17:05:31.131Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e", "user": "monitor"} 2024-05-14T17:05:31.195Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e"} 2024-05-14T17:05:31.227Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e"} 2024-05-14T17:05:31.272Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e", "user": "xtrabackup"} 2024-05-14T17:05:31.319Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e"} 2024-05-14T17:05:31.353Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e", "user": "replication"} 2024-05-14T17:05:31.479Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1cabe020-359d-4fc0-bf9e-f711b239a17e", "err": "get primary pxc pod: not found"} 2024-05-14T17:05:35.888Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "b14231d6-f0d5-4c49-be39-7ea24313d3d2", "err": "get primary pxc pod: not found"} 2024-05-14T17:05:41.078Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "723c4927-6652-4c0f-aff4-99fa79bd74a0", "err": "get primary pxc pod: not found"} 2024-05-14T17:05:46.305Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "114a2f27-3dad-4d33-8b06-0bd695dddba8", "err": "get primary pxc pod: not found"} 2024-05-14T17:05:51.511Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "f16778f7-bf40-4add-a9be-2fa70f48371f", "err": "get primary pxc pod: not found"} 2024-05-14T17:07:59.594Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "44731bb4-9590-4127-8e2e-934012ef766c", "user": "root"} 2024-05-14T17:07:59.862Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "44731bb4-9590-4127-8e2e-934012ef766c", "new version": "8.0.36-28.1"} 2024-05-14T17:08:18.373Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "44731bb4-9590-4127-8e2e-934012ef766c"} 2024-05-14T17:08:39.172Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "a5530d99-d9c7-4e16-b75f-cf2565050095"} 2024-05-14T17:09:02.065Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "0b24bfad-34d2-4fda-887a-b86f9da157e7"} 2024-05-14T17:09:23.572Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "4a71caf5-721f-443c-84e0-6bac06597b63"} 2024-05-14T17:09:37.913Z ERROR sync users {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "897efdd7-8560-4007-8a29-1b8db8d810fb", "error": "exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / ", "errorVerbose": "exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:920\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1246\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} 2024-05-14T17:10:40.231Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "90f51e5a-1aa4-4b24-bb14-c78676d9f4cf", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-1: dial tcp: lookup auto-tuning-pxc-1.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:11:33.017Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "05efd4b3-946f-4294-b625-e4832067b31b", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp: lookup auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:11:38.219Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "7beeb914-44bb-4822-8c4f-de07f2ea4565", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:11:43.563Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "f3f2f341-33ae-4d3e-9ea6-f58c3fbdd117", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:11:48.762Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "6456e936-322c-4008-a027-17615aa7234a", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:11:59.180Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "c077e9d1-a39f-405d-a5d8-4e02037b9926", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:12:13.528Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "741de0db-fd31-46c8-a699-001129132105"} 2024-05-14T17:12:18.619Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "95c42955-c1d2-4c46-a144-3a677ad59d31"} 2024-05-14T17:12:22.959Z ERROR sync users {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "c9502545-6da6-4681-a07b-4753caba5ea2", "error": "exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / ", "errorVerbose": "exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:920\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1246\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} 2024-05-14T17:13:16.458Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "0cbaef90-3562-4000-ab7e-a7df213d430c", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-1: dial tcp: lookup auto-tuning-pxc-1.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:13:21.711Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "691ae82d-8233-4b92-ae7e-c49aa0828583", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-1: dial tcp 10.74.130.6:33062: connect: connection refused"} 2024-05-14T17:14:09.764Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "9bcbcf03-fc5b-4cca-87e6-b8c647e8ec8b", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp 10.74.129.8:33062: connect: connection refused"} 2024-05-14T17:14:15.082Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1b5e3d41-fdfe-43ca-9cf9-d49ce2c24e1e", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:20.394Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "83e677a1-0541-4b86-ad4b-600af84493bc", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:25.568Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "dd979338-db04-4bef-8a32-69b2b0b9c7ae", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:30.752Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "7959e76b-d452-4694-a534-f5000dc0031c", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:35.934Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "a6249ada-ecc3-4283-8d39-49682315d062", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:41.150Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "b1fef168-c379-4f65-9570-4a63115860df", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:14:50.313Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "ad67b9bf-a060-4462-abab-9f6890d8cbc5"} 2024-05-14T17:14:55.243Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "762f8795-6905-454c-b10f-6006a76400a3"} 2024-05-14T17:15:00.934Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "7c72097d-0182-4152-9616-e1483159f02a"} 2024-05-14T17:15:06.139Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "f793e872-edf6-440b-a369-c35e4bb5e325"} 2024-05-14T17:15:11.762Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "7e6fe72b-0987-4e89-97a3-589d3d7d31e7"} 2024-05-14T17:15:17.048Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "6150d52c-876d-4701-aa2d-1877a07dc2bb"} 2024-05-14T17:15:21.666Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "5997ecc6-1a7d-4729-91ce-545fa260279e", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-2: dial tcp: lookup auto-tuning-pxc-2.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:15:22.736Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1e7abdab-2e52-4931-ba04-788a85ac0c3a"} 2024-05-14T17:16:37.727Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "cd76459f-492a-423b-832b-ec4b187a27d1", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-1: dial tcp: lookup auto-tuning-pxc-1.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:17:15.045Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "9ddd3ce6-9bf5-47fc-851b-0cf2717a4b26", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp: lookup auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:17:15.311Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "ce2351b8-844e-40c2-a4ce-9a6059a2864e", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp: lookup auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:17:20.373Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "98805e7f-5663-4fec-8168-2dc6a48f968a", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp: lookup auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:17:25.552Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "d55660e6-56f1-4cf7-ae5a-932dabfd9ba6", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:30.753Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "f0cce3f0-079f-47cb-bc28-ec29c1e3fa7f", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:35.972Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "39cd3cd4-407a-456e-8c83-04b6e2467ddd", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:41.186Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "8e4d16dc-a9ce-42a7-aeff-dcb8d6024f6e", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:46.361Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "89a91f69-cd48-4c0e-9801-2afde78fcac0", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:51.571Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "c38feaac-cdc7-4755-8bd2-03c0685d47db", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:17:56.775Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "0cfb93dd-bd36-4a28-8d19-cf377097ba34", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:18:06.030Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "3e153600-bb53-4cad-8fac-b3f5e2103cd3"} 2024-05-14T17:18:11.132Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "2192189d-b0b8-4e47-98e1-d8c37faf0c87"} 2024-05-14T17:18:16.503Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "4d9118a4-e420-43fe-83e0-4781ed96ba94"} 2024-05-14T17:18:22.079Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "8bc58926-81fa-4b2a-aadc-e4b098cd5709"} 2024-05-14T17:18:27.636Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "15a30a3b-86e8-442b-8875-68797e0e6a41"} 2024-05-14T17:18:33.020Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "0ceac978-c698-4e84-b432-1c9c503784db"} 2024-05-14T17:18:38.508Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "5a36cd0b-4eb9-4d77-94ff-25f3c48eec54"} 2024-05-14T17:18:42.893Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "2ea44252-c662-4551-b237-403d6d2573bc"} 2024-05-14T17:19:39.245Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "11c1f83a-2f31-4f10-83da-31ce3e155674", "err": "failed to ensure cluster readonly status: connect to pod auto-tuning-pxc-1: dial tcp 10.74.130.9:33062: connect: connection refused"} 2024-05-14T17:20:32.502Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "56eeed71-2716-4ef3-b50a-b3d7958433b3", "err": "failed to connect to pod auto-tuning-pxc-0: dial tcp: lookup auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749 on 10.130.176.10:53: no such host"} 2024-05-14T17:20:42.967Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "0aede681-88fa-43a1-967d-916a273eb0f7", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:20:48.501Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "1b4918c0-3f7c-46ad-81bc-553a06326c3c", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:21:04.206Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "4c6ba508-51d2-4396-94ff-6cf0b5fc699b", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:21:09.379Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "e4af99a6-c966-44bd-9c99-60a6a0eb76c5", "primary name": "auto-tuning-pxc-0.auto-tuning-pxc.auto-tuning-20749.svc.cluster.local"} 2024-05-14T17:21:18.785Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "f3a61845-2fff-414d-9f97-04b0b20180a4"} 2024-05-14T17:21:23.830Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "dd9b11fa-0681-4b2f-8c83-e1f01dc25a56"} 2024-05-14T17:21:29.357Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "auto-tuning-20749", "name": "auto-tuning", "reconcileID": "7fd3b510-dd58-413b-9906-fe4e5cef3f65"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.2/pkg/internal/controller/controller.go:222 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.2/pkg/internal/controller/controller.go:261 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.2/pkg/internal/controller/controller.go:324 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1248 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + kubectl patch pxc -n auto-tuning-20749 auto-tuning --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/auto-tuning patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.TmKoHIcSLw ++ mktemp + local LAST_ERR=/tmp/tmp.C7o0t8fKo9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TmKoHIcSLw perconaxtradbcluster.pxc.percona.com "auto-tuning" deleted + cat /tmp/tmp.C7o0t8fKo9 + rm /tmp/tmp.TmKoHIcSLw /tmp/tmp.C7o0t8fKo9 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.zINZxGA24S ++ mktemp + local LAST_ERR=/tmp/tmp.UsFjngWcuK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zINZxGA24S No resources found + cat /tmp/tmp.UsFjngWcuK + rm /tmp/tmp.zINZxGA24S /tmp/tmp.UsFjngWcuK + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tkArTuiuqW ++ mktemp + local LAST_ERR=/tmp/tmp.NRjGoNE32n + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tkArTuiuqW No resources found + cat /tmp/tmp.NRjGoNE32n + rm /tmp/tmp.tkArTuiuqW /tmp/tmp.NRjGoNE32n + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.ojuz44c6uq ++ mktemp + local LAST_ERR=/tmp/tmp.S4EolMo6u2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ojuz44c6uq validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.S4EolMo6u2 + rm /tmp/tmp.ojuz44c6uq /tmp/tmp.S4EolMo6u2 + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace auto-tuning-20749 + rm -rf /tmp/tmp.xsZGDOMGgG + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.sDbKGadKPA ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.QxB1AXCYRb + local LAST_ERR=/tmp/tmp.pkGhAJkkjm + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace auto-tuning-20749 + local LAST_ERR=/tmp/tmp.E0U2v28cUr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator