Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/logs/users-8-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra users-12846 + local ns=users-12846 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n users-9824 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/some-name patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.micqE1nPhJ ++ mktemp + local LAST_ERR=/tmp/tmp.UWLLdOVLXG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.micqE1nPhJ perconaxtradbcluster.pxc.percona.com "some-name" deleted from users-9824 namespace + cat /tmp/tmp.UWLLdOVLXG + rm /tmp/tmp.micqE1nPhJ /tmp/tmp.UWLLdOVLXG + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.TbOXWx5TCU ++ mktemp + local LAST_ERR=/tmp/tmp.yu5ZirTvqX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TbOXWx5TCU No resources found + cat /tmp/tmp.yu5ZirTvqX + rm /tmp/tmp.TbOXWx5TCU /tmp/tmp.yu5ZirTvqX + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.qikhfRmuEE ++ mktemp + local LAST_ERR=/tmp/tmp.MzQRA5UKUy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qikhfRmuEE No resources found + cat /tmp/tmp.MzQRA5UKUy + rm /tmp/tmp.qikhfRmuEE /tmp/tmp.MzQRA5UKUy + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.esP4B6t292 ++ mktemp + local LAST_OUT=/tmp/tmp.Pb20kEdqd7 ++ mktemp + local LAST_ERR=/tmp/tmp.BXyPPYbjWr + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.PBXM75r0J9 + local exit_status=0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Pb20kEdqd7 + cat /tmp/tmp.PBXM75r0J9 + rm /tmp/tmp.Pb20kEdqd7 /tmp/tmp.PBXM75r0J9 + return 0 namespace "users-9824" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.esP4B6t292 namespace "pxc-operator" deleted + cat /tmp/tmp.BXyPPYbjWr + rm /tmp/tmp.esP4B6t292 /tmp/tmp.BXyPPYbjWr + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.DpVwOsuX3A ++ mktemp + local LAST_ERR=/tmp/tmp.WoHHjE3Msh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DpVwOsuX3A namespace/pxc-operator created + cat /tmp/tmp.WoHHjE3Msh + rm /tmp/tmp.DpVwOsuX3A /tmp/tmp.WoHHjE3Msh + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6CujbvKqZM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ef8t0OWS1K ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6CujbvKqZM ++ cat /tmp/tmp.Ef8t0OWS1K ++ rm /tmp/tmp.6CujbvKqZM /tmp/tmp.Ef8t0OWS1K ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EBRZjUNZn2 ++ mktemp + local LAST_ERR=/tmp/tmp.Quq2d9yas1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EBRZjUNZn2 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5" modified. + cat /tmp/tmp.Quq2d9yas1 + rm /tmp/tmp.EBRZjUNZn2 /tmp/tmp.Quq2d9yas1 + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vPV6hegfaa ++ mktemp + local LAST_ERR=/tmp/tmp.kpp5HMplUA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vPV6hegfaa customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.kpp5HMplUA + rm /tmp/tmp.vPV6hegfaa /tmp/tmp.kpp5HMplUA + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rP0ikYFU0M ++ mktemp + local LAST_ERR=/tmp/tmp.lIt4oy6Ogr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rP0ikYFU0M clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.lIt4oy6Ogr + rm /tmp/tmp.rP0ikYFU0M /tmp/tmp.lIt4oy6Ogr + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - ++ mktemp + local LAST_OUT=/tmp/tmp.pW2NT81Bk6 ++ mktemp + local LAST_ERR=/tmp/tmp.YjZyQOG169 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pW2NT81Bk6 deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.YjZyQOG169 + rm /tmp/tmp.pW2NT81Bk6 /tmp/tmp.YjZyQOG169 + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.QpxqMMxVNP ++ mktemp + local LAST_ERR=/tmp/tmp.j4gKj83r8P + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QpxqMMxVNP pod/percona-xtradb-cluster-operator-944bd69c8-4lknz condition met + cat /tmp/tmp.j4gKj83r8P + rm /tmp/tmp.QpxqMMxVNP /tmp/tmp.j4gKj83r8P + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.KjcJqPFSkw +++ mktemp ++ local LAST_ERR=/tmp/tmp.s8yIXY3u4u ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KjcJqPFSkw ++ cat /tmp/tmp.s8yIXY3u4u ++ rm /tmp/tmp.KjcJqPFSkw /tmp/tmp.s8yIXY3u4u ++ return 0 + wait_pod percona-xtradb-cluster-operator-944bd69c8-4lknz 480 pxc-operator + local pod=percona-xtradb-cluster-operator-944bd69c8-4lknz + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-944bd69c8-4lknz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-944bd69c8-4lknz condition met waiting for pod/percona-xtradb-cluster-operator-944bd69c8-4lknz to become Ready.Ok + sleep 3 + create_namespace users-12846 + local namespace=users-12846 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces users-12846' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces users-12846 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace users-12846 ++ mktemp + local LAST_OUT=/tmp/tmp.n9GcpRmg9Y + local LAST_OUT=/tmp/tmp.yjCBeBchYS + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.sxtEVZA0I5 + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.5nYlfpm3bV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-12846 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-12846 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.n9GcpRmg9Y + cat /tmp/tmp.sxtEVZA0I5 + rm /tmp/tmp.n9GcpRmg9Y /tmp/tmp.sxtEVZA0I5 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-12846 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.yjCBeBchYS + cat /tmp/tmp.5nYlfpm3bV Error from server (NotFound): namespaces "users-12846" not found + rm /tmp/tmp.yjCBeBchYS /tmp/tmp.5nYlfpm3bV + return 1 + : + wait_for_delete namespace/users-12846 + local res=namespace/users-12846 + echo -n 'waiting for namespace/users-12846 to be deleted' waiting for namespace/users-12846 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "users-12846" not found + desc 'create namespace users-12846' + set +o xtrace ----------------------------------------------------------------------------------- create namespace users-12846 ----------------------------------------------------------------------------------- + kubectl_bin create namespace users-12846 ++ mktemp + local LAST_OUT=/tmp/tmp.UKMasPlfEe ++ mktemp + local LAST_ERR=/tmp/tmp.opOPHSbtGO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace users-12846 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UKMasPlfEe namespace/users-12846 created + cat /tmp/tmp.opOPHSbtGO + rm /tmp/tmp.UKMasPlfEe /tmp/tmp.opOPHSbtGO + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hnwa75pesx +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yr7Xm4mtIc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Hnwa75pesx ++ cat /tmp/tmp.Yr7Xm4mtIc ++ rm /tmp/tmp.Hnwa75pesx /tmp/tmp.Yr7Xm4mtIc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=users-12846 ++ mktemp + local LAST_OUT=/tmp/tmp.pFOA0Gsu1i ++ mktemp + local LAST_ERR=/tmp/tmp.Xx6SLs9G2z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=users-12846 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pFOA0Gsu1i Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5" modified. + cat /tmp/tmp.Xx6SLs9G2z + rm /tmp/tmp.pFOA0Gsu1i /tmp/tmp.Xx6SLs9G2z + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dtnGHPlHLY ++ mktemp + local LAST_ERR=/tmp/tmp.UgQKsrUCVK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dtnGHPlHLY secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.UgQKsrUCVK + rm /tmp/tmp.dtnGHPlHLY /tmp/tmp.UgQKsrUCVK + return 0 + desc 'create PXC cluster with 1-password secret' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster with 1-password secret ----------------------------------------------------------------------------------- + newpass=test-password ++ echo -n test-password ++ base64 + newpassencrypted=dGVzdC1wYXNzd29yZA== + cluster=some-name + spinup_pxc some-name /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml '' '' /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets_one_pass.yml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets_one_pass.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets_one_pass.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Pe2oIEioJe ++ mktemp + local LAST_ERR=/tmp/tmp.gLNCrkNOk6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets_one_pass.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Pe2oIEioJe secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.gLNCrkNOk6 + rm /tmp/tmp.Pe2oIEioJe /tmp/tmp.gLNCrkNOk6 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + local LAST_OUT=/tmp/tmp.OXQKpCcLfk + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.users-12846~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.slfdXSnpm4 + local exit_status=0 + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OXQKpCcLfk deployment.apps/pxc-client created + cat /tmp/tmp.slfdXSnpm4 + rm /tmp/tmp.OXQKpCcLfk /tmp/tmp.slfdXSnpm4 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/some-name.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.RlmMsAlTCt + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.users-12846~ + local LAST_ERR=/tmp/tmp.zFZ5XdQKvU + local exit_status=0 + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RlmMsAlTCt perconaxtradbcluster.pxc.percona.com/some-name created + cat /tmp/tmp.zFZ5XdQKvU + rm /tmp/tmp.RlmMsAlTCt /tmp/tmp.zFZ5XdQKvU + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy some-name ++ local target_cluster=some-name +++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sKmEGDT6nm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JR2MiFVnpd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.sKmEGDT6nm +++ cat /tmp/tmp.JR2MiFVnpd +++ rm /tmp/tmp.sKmEGDT6nm /tmp/tmp.JR2MiFVnpd +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DkQtL9FyBi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qlt2QvrBwf +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.DkQtL9FyBi +++ cat /tmp/tmp.qlt2QvrBwf +++ rm /tmp/tmp.DkQtL9FyBi /tmp/tmp.qlt2QvrBwf +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo some-name-proxysql ++ return + local proxy=some-name-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n users-12846 ++ mktemp + local LAST_OUT=/tmp/tmp.UzBGnRcXFx ++ mktemp + local LAST_ERR=/tmp/tmp.uYOMa6kzeD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n users-12846 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n users-12846 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n users-12846 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.UzBGnRcXFx + cat /tmp/tmp.uYOMa6kzeD error: no matching resources found + rm /tmp/tmp.UzBGnRcXFx /tmp/tmp.uYOMa6kzeD + return 1 + true + wait_for_running some-name-proxysql 1 + local name=some-name-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod some-name-proxysql-0 480 + local pod=some-name-proxysql-0 + local max_retry=480 + local ns= ++ echo some-name-proxysql-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=proxysql + set +o xtrace pod/some-name-proxysql-0 condition met waiting for pod/some-name-proxysql-0 to become Ready.Ok + wait_for_running some-name-pxc 3 + local name=some-name-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod some-name-pxc-0 480 + local pod=some-name-pxc-0 + local max_retry=480 + local ns= ++ echo some-name-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/some-name-pxc-0 condition met waiting for pod/some-name-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod some-name-pxc-1 480 + local pod=some-name-pxc-1 + local max_retry=480 + local ns= ++ echo some-name-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/some-name-pxc-1 condition met waiting for pod/some-name-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod some-name-pxc-2 480 + local pod=some-name-pxc-2 + local max_retry=480 + local ns= ++ echo some-name-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/some-name-pxc-2 condition met waiting for pod/some-name-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc some-name -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.S1dF0wMPhA +++ mktemp ++ local LAST_ERR=/tmp/tmp.aI7nIpubo2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.S1dF0wMPhA ++ cat /tmp/tmp.aI7nIpubo2 ++ rm /tmp/tmp.S1dF0wMPhA /tmp/tmp.aI7nIpubo2 ++ return 0 + local 'root_pass=llSjeKg*,4_Ft)5Z%5' + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MMVf1ZGuSL +++ mktemp ++ local LAST_ERR=/tmp/tmp.yhNNSwwpGe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MMVf1ZGuSL ++ cat /tmp/tmp.yhNNSwwpGe ++ rm /tmp/tmp.MMVf1ZGuSL /tmp/tmp.yhNNSwwpGe ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wjm6wmbW92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EcbL2GSfpK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Wjm6wmbW92 ++ cat /tmp/tmp.EcbL2GSfpK ++ rm /tmp/tmp.Wjm6wmbW92 /tmp/tmp.EcbL2GSfpK ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h some-name-pxc-0.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-0.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h some-name-pxc-0.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-0.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vga38HAURk +++ mktemp ++ local LAST_ERR=/tmp/tmp.DqHECbpfHU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vga38HAURk ++ cat /tmp/tmp.DqHECbpfHU ++ rm /tmp/tmp.vga38HAURk /tmp/tmp.DqHECbpfHU ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql /tmp/tmp.8WmlZy8yQB/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h some-name-pxc-1.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-1.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h some-name-pxc-1.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-1.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YTZJhiBrDf +++ mktemp ++ local LAST_ERR=/tmp/tmp.m7xZCss4V8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YTZJhiBrDf ++ cat /tmp/tmp.m7xZCss4V8 ++ rm /tmp/tmp.YTZJhiBrDf /tmp/tmp.m7xZCss4V8 ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql /tmp/tmp.8WmlZy8yQB/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h some-name-pxc-2.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-2.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h some-name-pxc-2.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h some-name-pxc-2.some-name-pxc -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dd5vAjAfc7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IfF6MmfEdl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Dd5vAjAfc7 ++ cat /tmp/tmp.IfF6MmfEdl ++ rm /tmp/tmp.Dd5vAjAfc7 /tmp/tmp.IfF6MmfEdl ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-1.sql /tmp/tmp.8WmlZy8yQB/select-1.sql + is_keyring_plugin_in_use some-name + local cluster=some-name + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + kubectl exec -it some-name-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' + grep -E -o 'early-plugin-load=keyring_\w+.so' Unable to use a TTY - input is not a terminal or the right kind of file + return 1 ++ kubectl exec -it some-name-proxysql-0 -- sh -c 'proxysql --version 2>/dev/null' ++ awk '{print $3}' ++ cut -d. -f1 Unable to use a TTY - input is not a terminal or the right kind of file + PROXYSQL_VER=2 + tables_cmp_file=select-2-proxysql3 + [[ 2 == 2 ]] + tables_cmp_file=select-2 + desc 'test missing passwords were created and present in internal secrets' + set +o xtrace ----------------------------------------------------------------------------------- test missing passwords were created and present in internal secrets ----------------------------------------------------------------------------------- + empty_pwds=() + wrong_pwds=() + for user in root xtrabackup monitor proxyadmin operator replication + echo 'Checking root' Checking root ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.CLkSlHgoH4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hep6kfXZ1e ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CLkSlHgoH4 ++ cat /tmp/tmp.Hep6kfXZ1e ++ rm /tmp/tmp.CLkSlHgoH4 /tmp/tmp.Hep6kfXZ1e ++ return 0 + secret_pass='llSjeKg*,4_Ft)5Z%5' ++ getSecretData internal-some-name root ++ local secretName=internal-some-name ++ local dataKey=root ++ kubectl_bin get secrets/internal-some-name '--template={{.data.root}}' +++ mktemp ++ base64 --decode ++ local LAST_OUT=/tmp/tmp.Q7GjPwYYt6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2v83X6DSCb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/internal-some-name '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Q7GjPwYYt6 ++ cat /tmp/tmp.2v83X6DSCb ++ rm /tmp/tmp.Q7GjPwYYt6 /tmp/tmp.2v83X6DSCb ++ return 0 + int_secret_pass='llSjeKg*,4_Ft)5Z%5' + [[ -z llSjeKg*,4_Ft)5Z%5 ]] + [[ llSjeKg*,4_Ft)5Z%5 != \l\l\S\j\e\K\g\*\,\4\_\F\t\)\5\Z\%\5 ]] + [[ root != \p\r\o\x\y\a\d\m\i\n ]] + [[ '' =~ root ]] + [[ '' =~ root ]] + echo 'Running compare for root' Running compare for root + compare_mysql_cmd select-4 'SHOW TABLES;' '-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\''' + local command_id=select-4 + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\''' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql ]] + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql + run_mysql 'SHOW TABLES;' '-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\''' + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -uroot -p'\''llSjeKg*,4_Ft)5Z%5'\''' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zjIJ6O30im +++ mktemp ++ local LAST_ERR=/tmp/tmp.7oRIc3Tcuw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zjIJ6O30im ++ cat /tmp/tmp.7oRIc3Tcuw ++ rm /tmp/tmp.zjIJ6O30im /tmp/tmp.7oRIc3Tcuw ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-4.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-4.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql /tmp/tmp.8WmlZy8yQB/select-4.sql + for user in root xtrabackup monitor proxyadmin operator replication + echo 'Checking xtrabackup' Checking xtrabackup ++ getSecretData my-cluster-secrets xtrabackup ++ local secretName=my-cluster-secrets ++ local dataKey=xtrabackup ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.xtrabackup}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.pPuKc0Agdu +++ mktemp ++ local LAST_ERR=/tmp/tmp.OIIfUz6LQu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.xtrabackup}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pPuKc0Agdu ++ cat /tmp/tmp.OIIfUz6LQu ++ rm /tmp/tmp.pPuKc0Agdu /tmp/tmp.OIIfUz6LQu ++ return 0 + secret_pass='CC?*{fV}j05^N++C=ut' ++ getSecretData internal-some-name xtrabackup ++ local secretName=internal-some-name ++ local dataKey=xtrabackup ++ kubectl_bin get secrets/internal-some-name '--template={{.data.xtrabackup}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.0zHcUGxWeh +++ mktemp ++ local LAST_ERR=/tmp/tmp.PYj3875Kue ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/internal-some-name '--template={{.data.xtrabackup}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0zHcUGxWeh ++ cat /tmp/tmp.PYj3875Kue ++ rm /tmp/tmp.0zHcUGxWeh /tmp/tmp.PYj3875Kue ++ return 0 + int_secret_pass='CC?*{fV}j05^N++C=ut' + [[ -z CC?*{fV}j05^N++C=ut ]] + [[ CC?*{fV}j05^N++C=ut != \C\C\?\*\{\f\V\}\j\0\5\^\N\+\+\C\=\u\t ]] + [[ xtrabackup != \p\r\o\x\y\a\d\m\i\n ]] + [[ '' =~ xtrabackup ]] + [[ '' =~ xtrabackup ]] + echo 'Running compare for xtrabackup' Running compare for xtrabackup + compare_mysql_cmd select-4 'SHOW TABLES;' '-h some-name-proxysql -uxtrabackup -p'\''CC?*{fV}j05^N++C=ut'\''' + local command_id=select-4 + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -uxtrabackup -p'\''CC?*{fV}j05^N++C=ut'\''' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql ]] + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql + run_mysql 'SHOW TABLES;' '-h some-name-proxysql -uxtrabackup -p'\''CC?*{fV}j05^N++C=ut'\''' + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -uxtrabackup -p'\''CC?*{fV}j05^N++C=ut'\''' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.64dvfr3I6R +++ mktemp ++ local LAST_ERR=/tmp/tmp.EDB5XD0Wv0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.64dvfr3I6R ++ cat /tmp/tmp.EDB5XD0Wv0 ++ rm /tmp/tmp.64dvfr3I6R /tmp/tmp.EDB5XD0Wv0 ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-4.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-4.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql /tmp/tmp.8WmlZy8yQB/select-4.sql + for user in root xtrabackup monitor proxyadmin operator replication + echo 'Checking monitor' Checking monitor ++ getSecretData my-cluster-secrets monitor ++ local secretName=my-cluster-secrets ++ local dataKey=monitor ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.monitor}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.2N0kb1MdAi +++ mktemp ++ local LAST_ERR=/tmp/tmp.zuGfX0xLeW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.monitor}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2N0kb1MdAi ++ cat /tmp/tmp.zuGfX0xLeW ++ rm /tmp/tmp.2N0kb1MdAi /tmp/tmp.zuGfX0xLeW ++ return 0 + secret_pass=monitor_password ++ getSecretData internal-some-name monitor ++ local secretName=internal-some-name ++ local dataKey=monitor ++ base64 --decode ++ kubectl_bin get secrets/internal-some-name '--template={{.data.monitor}}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.syV83rCRiR +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q2pVsNuK3e ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/internal-some-name '--template={{.data.monitor}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.syV83rCRiR ++ cat /tmp/tmp.Q2pVsNuK3e ++ rm /tmp/tmp.syV83rCRiR /tmp/tmp.Q2pVsNuK3e ++ return 0 + int_secret_pass=monitor_password + [[ -z monitor_password ]] + [[ monitor_password != \m\o\n\i\t\o\r\_\p\a\s\s\w\o\r\d ]] + [[ monitor != \p\r\o\x\y\a\d\m\i\n ]] + [[ '' =~ monitor ]] + [[ '' =~ monitor ]] + echo 'Running compare for monitor' Running compare for monitor + compare_mysql_cmd select-4 'SHOW TABLES;' '-h some-name-proxysql -umonitor -p'\''monitor_password'\''' + local command_id=select-4 + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -umonitor -p'\''monitor_password'\''' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql ]] + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql + run_mysql 'SHOW TABLES;' '-h some-name-proxysql -umonitor -p'\''monitor_password'\''' + local 'command=SHOW TABLES;' + local 'uri=-h some-name-proxysql -umonitor -p'\''monitor_password'\''' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S9fjFnYbUc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZC3eCENGd1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.S9fjFnYbUc ++ cat /tmp/tmp.ZC3eCENGd1 ++ rm /tmp/tmp.S9fjFnYbUc /tmp/tmp.ZC3eCENGd1 ++ return 0 + client_pod=pxc-client-67fc4995bb-flp9g + wait_pod pxc-client-67fc4995bb-flp9g + local pod=pxc-client-67fc4995bb-flp9g + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-flp9g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-flp9g condition met waiting for pod/pxc-client-67fc4995bb-flp9g to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8WmlZy8yQB/select-4.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8WmlZy8yQB/select-4.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql /tmp/tmp.8WmlZy8yQB/select-4.sql --- /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/users/compare/select-4-80.sql 2026-03-11 07:25:23.074303504 +0000 +++ /tmp/tmp.8WmlZy8yQB/select-4.sql 2026-03-11 11:30:09.785290194 +0000 @@ -1,88 +1,2 @@ -ADMINISTRABLE_ROLE_AUTHORIZATIONS -APPLICABLE_ROLES -CHARACTER_SETS -CHECK_CONSTRAINTS -CLIENT_STATISTICS -COLLATIONS -COLLATION_CHARACTER_SET_APPLICABILITY -COLUMNS -COLUMNS_EXTENSIONS -COLUMN_PRIVILEGES -COLUMN_STATISTICS -COMPRESSION_DICTIONARY -COMPRESSION_DICTIONARY_TABLES -ENABLED_ROLES -ENGINES -EVENTS -FILES -GLOBAL_TEMPORARY_TABLES -INDEX_STATISTICS -INNODB_BUFFER_PAGE -INNODB_BUFFER_PAGE_LRU -INNODB_BUFFER_POOL_STATS -INNODB_CACHED_INDEXES -INNODB_CMP -INNODB_CMPMEM -INNODB_CMPMEM_RESET -INNODB_CMP_PER_INDEX -INNODB_CMP_PER_INDEX_RESET -INNODB_CMP_RESET -INNODB_COLUMNS -INNODB_DATAFILES -INNODB_FIELDS -INNODB_FOREIGN -INNODB_FOREIGN_COLS -INNODB_FT_BEING_DELETED -INNODB_FT_CONFIG -INNODB_FT_DEFAULT_STOPWORD -INNODB_FT_DELETED -INNODB_FT_INDEX_CACHE -INNODB_FT_INDEX_TABLE -INNODB_INDEXES -INNODB_METRICS -INNODB_SESSION_TEMP_TABLESPACES -INNODB_TABLES -INNODB_TABLESPACES -INNODB_TABLESPACES_BRIEF -INNODB_TABLESTATS -INNODB_TEMP_TABLE_INFO -INNODB_TRX -INNODB_VIRTUAL -KEYWORDS -KEY_COLUMN_USAGE -OPTIMIZER_TRACE -PARAMETERS -PARTITIONS -PLUGINS -PROCESSLIST -PROFILING -REFERENTIAL_CONSTRAINTS -RESOURCE_GROUPS -ROLE_COLUMN_GRANTS -ROLE_ROUTINE_GRANTS -ROLE_TABLE_GRANTS -ROUTINES -SCHEMATA -SCHEMATA_EXTENSIONS -SCHEMA_PRIVILEGES -STATISTICS -ST_GEOMETRY_COLUMNS -ST_SPATIAL_REFERENCE_SYSTEMS -ST_UNITS_OF_MEASURE -TABLES -TABLESPACES -TABLESPACES_EXTENSIONS -TABLES_EXTENSIONS -TABLE_CONSTRAINTS -TABLE_CONSTRAINTS_EXTENSIONS -TABLE_PRIVILEGES -TABLE_STATISTICS -TEMPORARY_TABLES -THREAD_STATISTICS -TRIGGERS -USER_ATTRIBUTES -USER_PRIVILEGES -USER_STATISTICS -VIEWS -VIEW_ROUTINE_USAGE -VIEW_TABLE_USAGE +ERROR 1045 (28000): ProxySQL Error: Access denied for user 'monitor'@'10.182.192.77' (using password: YES) +command terminated with exit code 1