Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/logs/monitoring-2-0-8-0.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-22706 + local ns=monitoring-2-0-22706 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-17546 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.iNnkj4Jom8 ++ mktemp + local LAST_ERR=/tmp/tmp.JCPSdxaENs + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iNnkj4Jom8 perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-17546 namespace + cat /tmp/tmp.JCPSdxaENs + rm /tmp/tmp.iNnkj4Jom8 /tmp/tmp.JCPSdxaENs + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.I7ESubQSFk ++ mktemp + local LAST_ERR=/tmp/tmp.fV0y9eq9UH + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.I7ESubQSFk No resources found + cat /tmp/tmp.fV0y9eq9UH + rm /tmp/tmp.I7ESubQSFk /tmp/tmp.fV0y9eq9UH + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.VP3TannDbK ++ mktemp + local LAST_ERR=/tmp/tmp.YbM2Ptt2oB + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VP3TannDbK No resources found + cat /tmp/tmp.YbM2Ptt2oB + rm /tmp/tmp.VP3TannDbK /tmp/tmp.YbM2Ptt2oB + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.IMpxX0vgKs + local LAST_OUT=/tmp/tmp.a4eEs8Hcps egrep: warning: egrep is obsolescent; using grep -E ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.xJLyoBBTN5 + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.CZyoKbM1Sd + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IMpxX0vgKs + cat /tmp/tmp.xJLyoBBTN5 + rm /tmp/tmp.IMpxX0vgKs /tmp/tmp.xJLyoBBTN5 + return 0 namespace "monitoring-2-0-17546" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.a4eEs8Hcps namespace "pxc-operator" deleted + cat /tmp/tmp.CZyoKbM1Sd + rm /tmp/tmp.a4eEs8Hcps /tmp/tmp.CZyoKbM1Sd + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YLq06H6Xt6 ++ mktemp + local LAST_ERR=/tmp/tmp.ioxBZ7fAPR + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YLq06H6Xt6 namespace/pxc-operator created + cat /tmp/tmp.ioxBZ7fAPR + rm /tmp/tmp.YLq06H6Xt6 /tmp/tmp.ioxBZ7fAPR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y4YIa39n3S +++ mktemp ++ local LAST_ERR=/tmp/tmp.33ZapCc1yI ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Y4YIa39n3S ++ cat /tmp/tmp.33ZapCc1yI ++ rm /tmp/tmp.Y4YIa39n3S /tmp/tmp.33ZapCc1yI ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YKyqvThqjW ++ mktemp + local LAST_ERR=/tmp/tmp.JSVKjthnmo + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YKyqvThqjW Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5" modified. + cat /tmp/tmp.JSVKjthnmo + rm /tmp/tmp.YKyqvThqjW /tmp/tmp.JSVKjthnmo + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Km2UupXCQy ++ mktemp + local LAST_ERR=/tmp/tmp.Q3iDTntpcE + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Km2UupXCQy customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.Q3iDTntpcE + rm /tmp/tmp.Km2UupXCQy /tmp/tmp.Q3iDTntpcE + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.I7ewVtI163 ++ mktemp + local LAST_ERR=/tmp/tmp.6I9WctI7KG + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.I7ewVtI163 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.6I9WctI7KG + rm /tmp/tmp.I7ewVtI163 /tmp/tmp.6I9WctI7KG + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2200-89830e6d^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RGoPz3tHLG ++ mktemp + local LAST_ERR=/tmp/tmp.QhdlyrkQOw + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RGoPz3tHLG deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.QhdlyrkQOw + rm /tmp/tmp.RGoPz3tHLG /tmp/tmp.QhdlyrkQOw + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.CroxrORIsa ++ mktemp + local LAST_ERR=/tmp/tmp.xkxFAgMoe0 + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CroxrORIsa pod/percona-xtradb-cluster-operator-59745b97cd-z4ctj condition met + cat /tmp/tmp.xkxFAgMoe0 + rm /tmp/tmp.CroxrORIsa /tmp/tmp.xkxFAgMoe0 + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.9YNgobnqAX +++ mktemp ++ local LAST_ERR=/tmp/tmp.rNtpCxeX0d ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9YNgobnqAX ++ cat /tmp/tmp.rNtpCxeX0d ++ rm /tmp/tmp.9YNgobnqAX /tmp/tmp.rNtpCxeX0d ++ return 0 + wait_pod percona-xtradb-cluster-operator-59745b97cd-z4ctj 480 pxc-operator + local pod=percona-xtradb-cluster-operator-59745b97cd-z4ctj + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-59745b97cd-z4ctj ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-59745b97cd-z4ctj condition met waiting for pod/percona-xtradb-cluster-operator-59745b97cd-z4ctj to become Ready.Ok + sleep 3 + create_namespace monitoring-2-0-22706 + local namespace=monitoring-2-0-22706 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-22706' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-22706 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-22706 + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.8Da5CfE9gf egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.4aX8m3MW97 + local LAST_ERR=/tmp/tmp.xZD4Dpa4vu + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.lBzi3PHYhn + local exit_status=0 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-22706 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8Da5CfE9gf + cat /tmp/tmp.xZD4Dpa4vu + rm /tmp/tmp.8Da5CfE9gf /tmp/tmp.xZD4Dpa4vu + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-22706 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-22706 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.4aX8m3MW97 + cat /tmp/tmp.lBzi3PHYhn Error from server (NotFound): namespaces "monitoring-2-0-22706" not found + rm /tmp/tmp.4aX8m3MW97 /tmp/tmp.lBzi3PHYhn + return 1 + : + wait_for_delete namespace/monitoring-2-0-22706 + local res=namespace/monitoring-2-0-22706 + echo -n 'waiting for namespace/monitoring-2-0-22706 to be deleted' waiting for namespace/monitoring-2-0-22706 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-22706" not found + desc 'create namespace monitoring-2-0-22706' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-22706 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-22706 ++ mktemp + local LAST_OUT=/tmp/tmp.iwNTXbGFK9 ++ mktemp + local LAST_ERR=/tmp/tmp.PpJgZqWidG + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-22706 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iwNTXbGFK9 namespace/monitoring-2-0-22706 created + cat /tmp/tmp.PpJgZqWidG + rm /tmp/tmp.iwNTXbGFK9 /tmp/tmp.PpJgZqWidG + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QMkLRNnvtU +++ mktemp ++ local LAST_ERR=/tmp/tmp.vCXjuDjqiz ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QMkLRNnvtU ++ cat /tmp/tmp.vCXjuDjqiz ++ rm /tmp/tmp.QMkLRNnvtU /tmp/tmp.vCXjuDjqiz ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5 --namespace=monitoring-2-0-22706 ++ mktemp + local LAST_OUT=/tmp/tmp.l2TdVDyRuX ++ mktemp + local LAST_ERR=/tmp/tmp.Q8ZGFSUYqG + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5 --namespace=monitoring-2-0-22706 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.l2TdVDyRuX Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2200-89830e6d-1-cluster5" modified. + cat /tmp/tmp.Q8ZGFSUYqG + rm /tmp/tmp.l2TdVDyRuX /tmp/tmp.Q8ZGFSUYqG + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4pEKCHNV9n ++ mktemp + local LAST_ERR=/tmp/tmp.DJizPoQwLQ + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4pEKCHNV9n secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.DJizPoQwLQ + rm /tmp/tmp.4pEKCHNV9n /tmp/tmp.DJizPoQwLQ + return 0 + deploy_helm monitoring-2-0-22706 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Oct 1 02:12:16 2025 NAMESPACE: monitoring-2-0-22706 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-22706.svc.cluster.local:443 login: admin password: admin + kubectl wait pod monitoring-0 --for=condition=Ready --timeout=420s pod/monitoring-0 condition met + kubectl_bin wait --for=condition=Ready pod/monitoring-0 --timeout=120s ++ mktemp + local LAST_OUT=/tmp/tmp.3DHXpFbaqN ++ mktemp + local LAST_ERR=/tmp/tmp.3iRkvoPpXJ + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=condition=Ready pod/monitoring-0 --timeout=120s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3DHXpFbaqN pod/monitoring-0 condition met + cat /tmp/tmp.3iRkvoPpXJ + rm /tmp/tmp.3DHXpFbaqN /tmp/tmp.3iRkvoPpXJ + return 0 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.Lj8C0MxH8u ++ mktemp + local LAST_ERR=/tmp/tmp.T6mZGeDSGt + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Lj8C0MxH8u + cat /tmp/tmp.T6mZGeDSGt + rm /tmp/tmp.Lj8C0MxH8u /tmp/tmp.T6mZGeDSGt + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cj26Xe6U4Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.i0V6iWrlCc ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Cj26Xe6U4Q ++ cat /tmp/tmp.i0V6iWrlCc ++ rm /tmp/tmp.Cj26Xe6U4Q /tmp/tmp.i0V6iWrlCc ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.Mao5ZnI6fn ++ mktemp + local LAST_ERR=/tmp/tmp.JCPe76lgCX + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Mao5ZnI6fn logger=settings t=2025-10-01T02:12:43.614896645Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2025-10-01T02:12:43.615016754Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2025-10-01T02:12:43.615025954Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2025-10-01T02:12:43.615029644Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2025-10-01T02:12:43.615033744Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2025-10-01T02:12:43.615038684Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2025-10-01T02:12:43.615044624Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2025-10-01T02:12:43.615049764Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2025-10-01T02:12:43.615054574Z level=info msg="App mode production" logger=sqlstore t=2025-10-01T02:12:43.615116704Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2025-10-01T02:12:43.627670076Z level=info msg="Starting DB migrations" logger=migrator t=2025-10-01T02:12:43.631114543Z level=info msg="migrations completed" performed=0 skipped=452 duration=349.11µs logger=secrets t=2025-10-01T02:12:43.632289373Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2025-10-01T02:12:43.665554859Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2025-10-01T02:12:43.783893828Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2025-10-01T02:12:43.789265604Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2025-10-01T02:12:43.789462884Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2025-10-01T02:12:43.789493924Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2025-10-01T02:12:43.789522524Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2025-10-01T02:12:43.789551404Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.loader t=2025-10-01T02:12:43.789643534Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2025-10-01T02:12:43.789688844Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2025-10-01T02:12:43.789727064Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2025-10-01T02:12:43.789758894Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2025-10-01T02:12:43.789803304Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2025-10-01T02:12:43.789814434Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2025-10-01T02:12:43.789821744Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2025-10-01T02:12:43.789827394Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2025-10-01T02:12:43.789833124Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2025-10-01T02:12:43.789839844Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2025-10-01T02:12:43.799606137Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2025-10-01T02:12:43.799625997Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2025-10-01T02:12:43.799633437Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel Admin password changed successfully ✔ + cat /tmp/tmp.JCPe76lgCX + rm /tmp/tmp.Mao5ZnI6fn /tmp/tmp.JCPe76lgCX + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.wlHEMXoeTq ++ mktemp + local LAST_ERR=/tmp/tmp.eruZs7YrlD + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wlHEMXoeTq secret/my-cluster-secrets created + cat /tmp/tmp.eruZs7YrlD + rm /tmp/tmp.wlHEMXoeTq /tmp/tmp.eruZs7YrlD + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/conf/client.yml + /usr/sbin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/sbin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/sbin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/sbin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2200-89830e6d#' + /usr/sbin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.uKVknzuflo ++ mktemp + local LAST_ERR=/tmp/tmp.yOvjQ0aMUz + local exit_status=0 + /usr/sbin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/sbin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ seq 0 2 + /usr/sbin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/sbin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/sbin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-22706~ + for i in $(seq 0 2) + set +e + kubectl apply -f - + /usr/sbin/sed -e 's#apply:.*#apply: Never#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uKVknzuflo deployment.apps/pxc-client created + cat /tmp/tmp.yOvjQ0aMUz + rm /tmp/tmp.uKVknzuflo /tmp/tmp.yOvjQ0aMUz + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/monitoring.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/monitoring.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/conf/monitoring.yml + /usr/sbin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/sbin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/sbin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/sbin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2200-89830e6d#' + /usr/sbin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/sbin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_OUT=/tmp/tmp.JdH0Vdblcn ++ mktemp + /usr/sbin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/sbin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/sbin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/sbin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-22706~ + /usr/sbin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.mVwqMilO0L + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JdH0Vdblcn perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.mVwqMilO0L + rm /tmp/tmp.JdH0Vdblcn /tmp/tmp.mVwqMilO0L + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QcDvUUNVuM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uIuxjPEjlA +++ local exit_status=0 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.QcDvUUNVuM +++ cat /tmp/tmp.uIuxjPEjlA +++ rm /tmp/tmp.QcDvUUNVuM /tmp/tmp.uIuxjPEjlA +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-22706 ++ mktemp + local LAST_OUT=/tmp/tmp.ePnqDx78DD ++ mktemp + local LAST_ERR=/tmp/tmp.fMf5P4rOa6 + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-22706 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-22706 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-22706 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.ePnqDx78DD + cat /tmp/tmp.fMf5P4rOa6 error: no matching resources found + rm /tmp/tmp.ePnqDx78DD /tmp/tmp.fMf5P4rOa6 + return 1 + true + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in $(seq 0 $last_pod) + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in $(seq 0 $last_pod) + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in $(seq 0 $last_pod) + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in $(seq 0 $last_pod) + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.Co8b7ifSOK +++ mktemp ++ local LAST_ERR=/tmp/tmp.bevTHYhQLB ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Co8b7ifSOK ++ cat /tmp/tmp.bevTHYhQLB ++ rm /tmp/tmp.Co8b7ifSOK /tmp/tmp.bevTHYhQLB ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IxALhHou6N +++ mktemp ++ local LAST_ERR=/tmp/tmp.bkhnmMdSXJ ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IxALhHou6N ++ cat /tmp/tmp.bkhnmMdSXJ ++ rm /tmp/tmp.IxALhHou6N /tmp/tmp.bkhnmMdSXJ ++ return 0 + client_pod=pxc-client-59944c5bbf-p7hmx + wait_pod pxc-client-59944c5bbf-p7hmx + local pod=pxc-client-59944c5bbf-p7hmx + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-p7hmx ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/pxc-client-59944c5bbf-p7hmx condition met waiting for pod/pxc-client-59944c5bbf-p7hmx to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o2N1wPTaJf +++ mktemp ++ local LAST_ERR=/tmp/tmp.gcA1FGdxpZ ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.o2N1wPTaJf ++ cat /tmp/tmp.gcA1FGdxpZ ++ rm /tmp/tmp.o2N1wPTaJf /tmp/tmp.gcA1FGdxpZ ++ return 0 + client_pod=pxc-client-59944c5bbf-p7hmx + wait_pod pxc-client-59944c5bbf-p7hmx + local pod=pxc-client-59944c5bbf-p7hmx + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-p7hmx ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/pxc-client-59944c5bbf-p7hmx condition met waiting for pod/pxc-client-59944c5bbf-p7hmx to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in $(seq 0 $((size - 1))) + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4KRVX1rlpY +++ mktemp ++ local LAST_ERR=/tmp/tmp.sZ2ueZ5vH3 ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4KRVX1rlpY ++ cat /tmp/tmp.sZ2ueZ5vH3 ++ rm /tmp/tmp.4KRVX1rlpY /tmp/tmp.sZ2ueZ5vH3 ++ return 0 + client_pod=pxc-client-59944c5bbf-p7hmx + wait_pod pxc-client-59944c5bbf-p7hmx + local pod=pxc-client-59944c5bbf-p7hmx + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-p7hmx ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/pxc-client-59944c5bbf-p7hmx condition met waiting for pod/pxc-client-59944c5bbf-p7hmx to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.9SUCx2kV4U/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.9SUCx2kV4U/select-1.sql + for i in $(seq 0 $((size - 1))) + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kV34d0ISsN +++ mktemp ++ local LAST_ERR=/tmp/tmp.6DmEVnjv54 ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kV34d0ISsN ++ cat /tmp/tmp.6DmEVnjv54 ++ rm /tmp/tmp.kV34d0ISsN /tmp/tmp.6DmEVnjv54 ++ return 0 + client_pod=pxc-client-59944c5bbf-p7hmx + wait_pod pxc-client-59944c5bbf-p7hmx + local pod=pxc-client-59944c5bbf-p7hmx + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-p7hmx ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/pxc-client-59944c5bbf-p7hmx condition met waiting for pod/pxc-client-59944c5bbf-p7hmx to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.9SUCx2kV4U/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.9SUCx2kV4U/select-1.sql + for i in $(seq 0 $((size - 1))) + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dVtp4LXqwV +++ mktemp ++ local LAST_ERR=/tmp/tmp.1b5PjMHtK6 ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dVtp4LXqwV ++ cat /tmp/tmp.1b5PjMHtK6 ++ rm /tmp/tmp.dVtp4LXqwV /tmp/tmp.1b5PjMHtK6 ++ return 0 + client_pod=pxc-client-59944c5bbf-p7hmx + wait_pod pxc-client-59944c5bbf-p7hmx + local pod=pxc-client-59944c5bbf-p7hmx + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-p7hmx ++ /usr/sbin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' egrep: warning: egrep is obsolescent; using grep -E + local container= + set +o xtrace pod/pxc-client-59944c5bbf-p7hmx condition met waiting for pod/pxc-client-59944c5bbf-p7hmx to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.9SUCx2kV4U/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2200/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.9SUCx2kV4U/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ kubectl_bin exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x70QkqjvkN egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.9YoVPLwPrZ ++ local exit_status=0 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.x70QkqjvkN ++ cat /tmp/tmp.9YoVPLwPrZ Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.x70QkqjvkN /tmp/tmp.9YoVPLwPrZ ++ return 0 + '[' '' ']' + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qsJDuUV3To +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UFtv1hj7nb ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.qsJDuUV3To ++++ cat /tmp/tmp.UFtv1hj7nb jq: error (at :58): Cannot iterate over null (null) ++++ rm /tmp/tmp.qsJDuUV3To /tmp/tmp.UFtv1hj7nb ++++ return 0 +++ local hostname= +++ '[' -n '' -a '' '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.r9xCAB95O0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CD6i7lW2tl ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.r9xCAB95O0 jq: error (at :58): Cannot iterate over null (null) ++++ cat /tmp/tmp.CD6i7lW2tl ++++ rm /tmp/tmp.r9xCAB95O0 /tmp/tmp.CD6i7lW2tl ++++ return 0 +++ local ip= +++ '[' -n '' -a '' '!=' null ']' +++ exit 1 ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@/graph/api/auth/keys curl: (3) URL rejected: No host part in the URL + API_KEY= + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": }}' ++ mktemp + local LAST_OUT=/tmp/tmp.Qg3ZQNPGW5 ++ mktemp + local LAST_ERR=/tmp/tmp.lQc4Hu9df1 + local exit_status=0 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": }}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": }}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in $(seq 0 2) + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": }}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.Qg3ZQNPGW5 + cat /tmp/tmp.lQc4Hu9df1 Error from server (BadRequest): error decoding patch: invalid character '}' looking for beginning of value + rm /tmp/tmp.Qg3ZQNPGW5 /tmp/tmp.lQc4Hu9df1 + return 1