Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/logs/storage-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + '[' -n '' ']' + main + create_infra storage-24752 + local ns=storage-24752 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.mU3fILH6rW ++ mktemp + local LAST_ERR=/tmp/tmp.HGw7euyTyS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mU3fILH6rW No resources found + cat /tmp/tmp.HGw7euyTyS + rm /tmp/tmp.mU3fILH6rW /tmp/tmp.HGw7euyTyS + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.jNcO9cZO5M ++ mktemp + local LAST_ERR=/tmp/tmp.QoCwQ5m3NC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.jNcO9cZO5M + cat /tmp/tmp.QoCwQ5m3NC error: the server doesn't have a resource type "pxc-backup" + rm /tmp/tmp.jNcO9cZO5M /tmp/tmp.QoCwQ5m3NC + return 1 + : + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.znjvkMYk89 ++ mktemp + local LAST_ERR=/tmp/tmp.PWGSDfqirv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.znjvkMYk89 + cat /tmp/tmp.PWGSDfqirv error: the server doesn't have a resource type "pxc-restore" + rm /tmp/tmp.znjvkMYk89 /tmp/tmp.PWGSDfqirv + return 1 + : + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.zl4Au5EDW6 + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.LYtA0cbxnL ++ mktemp + local LAST_ERR=/tmp/tmp.YqdQxQxg0k + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Uks1bxZg2g + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zl4Au5EDW6 + cat /tmp/tmp.YqdQxQxg0k + rm /tmp/tmp.zl4Au5EDW6 /tmp/tmp.YqdQxQxg0k + return 0 namespace "storage-22025" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LYtA0cbxnL namespace "pxc-operator" deleted + cat /tmp/tmp.Uks1bxZg2g + rm /tmp/tmp.LYtA0cbxnL /tmp/tmp.Uks1bxZg2g + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5UoL3BK0A2 ++ mktemp + local LAST_ERR=/tmp/tmp.95SEtvdzol + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5UoL3BK0A2 namespace/pxc-operator created + cat /tmp/tmp.95SEtvdzol + rm /tmp/tmp.5UoL3BK0A2 /tmp/tmp.95SEtvdzol + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.An3dndWr2t +++ mktemp ++ local LAST_ERR=/tmp/tmp.wBKjdahHxg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.An3dndWr2t ++ cat /tmp/tmp.wBKjdahHxg ++ rm /tmp/tmp.An3dndWr2t /tmp/tmp.wBKjdahHxg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tEkns6DFGc ++ mktemp + local LAST_ERR=/tmp/tmp.nKrhjyXvra + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tEkns6DFGc Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9" modified. + cat /tmp/tmp.nKrhjyXvra + rm /tmp/tmp.tEkns6DFGc /tmp/tmp.nKrhjyXvra + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.m4FVCXdVi7 ++ mktemp + local LAST_ERR=/tmp/tmp.yro5yQ5FTu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.m4FVCXdVi7 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.yro5yQ5FTu + rm /tmp/tmp.m4FVCXdVi7 /tmp/tmp.yro5yQ5FTu + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.O3NGAsuSq1 ++ mktemp + local LAST_ERR=/tmp/tmp.RnVcjwsWGr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.O3NGAsuSq1 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.RnVcjwsWGr + rm /tmp/tmp.O3NGAsuSq1 /tmp/tmp.RnVcjwsWGr + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.gE7B62CRAt ++ mktemp + local LAST_ERR=/tmp/tmp.aM0GqhsAOW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gE7B62CRAt deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.aM0GqhsAOW + rm /tmp/tmp.gE7B62CRAt /tmp/tmp.aM0GqhsAOW + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.lPfF5UPrqI ++ mktemp + local LAST_ERR=/tmp/tmp.apgzAqEa5s + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lPfF5UPrqI pod/percona-xtradb-cluster-operator-859595f865-gjvj2 condition met + cat /tmp/tmp.apgzAqEa5s + rm /tmp/tmp.lPfF5UPrqI /tmp/tmp.apgzAqEa5s + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.WOPH9Fx6Ow +++ mktemp ++ local LAST_ERR=/tmp/tmp.trOf5LAy1X ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WOPH9Fx6Ow ++ cat /tmp/tmp.trOf5LAy1X ++ rm /tmp/tmp.WOPH9Fx6Ow /tmp/tmp.trOf5LAy1X ++ return 0 + wait_pod percona-xtradb-cluster-operator-859595f865-gjvj2 480 pxc-operator + local pod=percona-xtradb-cluster-operator-859595f865-gjvj2 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-859595f865-gjvj2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-859595f865-gjvj2 condition met waiting for pod/percona-xtradb-cluster-operator-859595f865-gjvj2 to become Ready.Ok + sleep 3 + create_namespace storage-24752 + local namespace=storage-24752 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces storage-24752' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces storage-24752 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace storage-24752 + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.zGfa6iL4gA ++ mktemp + local LAST_OUT=/tmp/tmp.5gT8xuasSD ++ mktemp + local LAST_ERR=/tmp/tmp.R64RVjm0HR + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Lq70tK64k7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-24752 + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-24752 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zGfa6iL4gA + cat /tmp/tmp.R64RVjm0HR + rm /tmp/tmp.zGfa6iL4gA /tmp/tmp.R64RVjm0HR + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.5gT8xuasSD + cat /tmp/tmp.Lq70tK64k7 Error from server (NotFound): namespaces "storage-24752" not found + rm /tmp/tmp.5gT8xuasSD /tmp/tmp.Lq70tK64k7 + return 1 + : + wait_for_delete namespace/storage-24752 + local res=namespace/storage-24752 + echo -n 'waiting for namespace/storage-24752 to be deleted' waiting for namespace/storage-24752 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "storage-24752" not found + desc 'create namespace storage-24752' + set +o xtrace ----------------------------------------------------------------------------------- create namespace storage-24752 ----------------------------------------------------------------------------------- + kubectl_bin create namespace storage-24752 ++ mktemp + local LAST_OUT=/tmp/tmp.9vnj31dkeI ++ mktemp + local LAST_ERR=/tmp/tmp.dsByVNpxGs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace storage-24752 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9vnj31dkeI namespace/storage-24752 created + cat /tmp/tmp.dsByVNpxGs + rm /tmp/tmp.9vnj31dkeI /tmp/tmp.dsByVNpxGs + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.JymIYXitOX +++ mktemp ++ local LAST_ERR=/tmp/tmp.RuNSzrCTEv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JymIYXitOX ++ cat /tmp/tmp.RuNSzrCTEv ++ rm /tmp/tmp.JymIYXitOX /tmp/tmp.RuNSzrCTEv ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9 --namespace=storage-24752 ++ mktemp + local LAST_OUT=/tmp/tmp.DFUIYxoQVb ++ mktemp + local LAST_ERR=/tmp/tmp.9PZsgMSin7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9 --namespace=storage-24752 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DFUIYxoQVb Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster9" modified. + cat /tmp/tmp.9PZsgMSin7 + rm /tmp/tmp.DFUIYxoQVb /tmp/tmp.9PZsgMSin7 + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7PCa3ogTNB ++ mktemp + local LAST_ERR=/tmp/tmp.1GWnuTDSSh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7PCa3ogTNB secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.1GWnuTDSSh + rm /tmp/tmp.7PCa3ogTNB /tmp/tmp.1GWnuTDSSh + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + local LAST_OUT=/tmp/tmp.hZEPl0B8Sg + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_ERR=/tmp/tmp.hFjRb4T3w5 + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ seq 0 2 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-24752~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hZEPl0B8Sg deployment.apps/pxc-client created + cat /tmp/tmp.hFjRb4T3w5 + rm /tmp/tmp.hZEPl0B8Sg /tmp/tmp.hFjRb4T3w5 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath-helper.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Sn4W91uvPi ++ mktemp + local LAST_ERR=/tmp/tmp.9bQEu2L6ZI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath-helper.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Sn4W91uvPi secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created daemonset.apps/hostpath-helper created + cat /tmp/tmp.9bQEu2L6ZI + rm /tmp/tmp.Sn4W91uvPi /tmp/tmp.9bQEu2L6ZI + return 0 + desc 'check emptydir' + set +o xtrace ----------------------------------------------------------------------------------- check emptydir ----------------------------------------------------------------------------------- + check_cr_config emptydir + local cluster=emptydir + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + spinup_pxc emptydir /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml + local cluster=emptydir + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PhYngNC4Dc ++ mktemp + local LAST_ERR=/tmp/tmp.x2lKMyhNxM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PhYngNC4Dc secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.x2lKMyhNxM + rm /tmp/tmp.PhYngNC4Dc /tmp/tmp.x2lKMyhNxM + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.ysHeEZ7ez9 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-24752~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.vcrNkXQ2P5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ysHeEZ7ez9 deployment.apps/pxc-client unchanged + cat /tmp/tmp.vcrNkXQ2P5 + rm /tmp/tmp.ysHeEZ7ez9 /tmp/tmp.vcrNkXQ2P5 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + local LAST_OUT=/tmp/tmp.K20cdvKZsl + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-24752~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.DUYCKK1DWE + local exit_status=0 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.K20cdvKZsl perconaxtradbcluster.pxc.percona.com/emptydir created + cat /tmp/tmp.DUYCKK1DWE + rm /tmp/tmp.K20cdvKZsl /tmp/tmp.DUYCKK1DWE + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy emptydir ++ local target_cluster=emptydir +++ kubectl_bin get pxc emptydir -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qzvBPkkwcx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KSOg7gsKMF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc emptydir -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.qzvBPkkwcx +++ cat /tmp/tmp.KSOg7gsKMF +++ rm /tmp/tmp.qzvBPkkwcx /tmp/tmp.KSOg7gsKMF +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc emptydir -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.68VgTfdJ3i ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3LTu9WUhCj +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc emptydir -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.68VgTfdJ3i +++ cat /tmp/tmp.3LTu9WUhCj +++ rm /tmp/tmp.68VgTfdJ3i /tmp/tmp.3LTu9WUhCj +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo emptydir-proxysql ++ return + local proxy=emptydir-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 ++ mktemp + local LAST_OUT=/tmp/tmp.4HXFaKTIUX ++ mktemp + local LAST_ERR=/tmp/tmp.JJycLAET2H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.4HXFaKTIUX + cat /tmp/tmp.JJycLAET2H error: no matching resources found + rm /tmp/tmp.4HXFaKTIUX /tmp/tmp.JJycLAET2H + return 1 + true + wait_for_running emptydir-proxysql 1 + local name=emptydir-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-proxysql-0 480 + local pod=emptydir-proxysql-0 + local max_retry=480 + local ns= ++ echo emptydir-proxysql-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=proxysql + set +o xtrace pod/emptydir-proxysql-0 condition met waiting for pod/emptydir-proxysql-0 to become Ready.Ok + wait_for_running emptydir-pxc 3 + local name=emptydir-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-0 480 + local pod=emptydir-pxc-0 + local max_retry=480 + local ns= ++ echo emptydir-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/emptydir-pxc-0 condition met waiting for pod/emptydir-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-1 480 + local pod=emptydir-pxc-1 + local max_retry=480 + local ns= ++ echo emptydir-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/emptydir-pxc-1 condition met waiting for pod/emptydir-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod emptydir-pxc-2 480 + local pod=emptydir-pxc-2 + local max_retry=480 + local ns= ++ echo emptydir-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/emptydir-pxc-2 condition met waiting for pod/emptydir-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc emptydir -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.pMwuoa9Hk7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OVaUZMxumh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pMwuoa9Hk7 ++ cat /tmp/tmp.OVaUZMxumh ++ rm /tmp/tmp.pMwuoa9Hk7 /tmp/tmp.OVaUZMxumh ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nsVxqbeejW +++ mktemp ++ local LAST_ERR=/tmp/tmp.2jvIive5s1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nsVxqbeejW ++ cat /tmp/tmp.2jvIive5s1 ++ rm /tmp/tmp.nsVxqbeejW /tmp/tmp.2jvIive5s1 ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h emptydir-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v5sEyUMfcD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z52bRPUlv6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.v5sEyUMfcD ++ cat /tmp/tmp.Z52bRPUlv6 ++ rm /tmp/tmp.v5sEyUMfcD /tmp/tmp.Z52bRPUlv6 ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-0.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EMbMIHSaWZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tdw3mfPfU1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EMbMIHSaWZ ++ cat /tmp/tmp.Tdw3mfPfU1 ++ rm /tmp/tmp.EMbMIHSaWZ /tmp/tmp.Tdw3mfPfU1 ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-1.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.akVMdB6Rvk +++ mktemp ++ local LAST_ERR=/tmp/tmp.hs9i6yNqde ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.akVMdB6Rvk ++ cat /tmp/tmp.hs9i6yNqde ++ rm /tmp/tmp.akVMdB6Rvk /tmp/tmp.hs9i6yNqde ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h emptydir-pxc-2.emptydir-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4RrAxK5N3S +++ mktemp ++ local LAST_ERR=/tmp/tmp.zm4efgaXua ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4RrAxK5N3S ++ cat /tmp/tmp.zm4efgaXua ++ rm /tmp/tmp.4RrAxK5N3S /tmp/tmp.zm4efgaXua ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql ++ is_keyring_plugin_in_use emptydir ++ local cluster=emptydir ++ kubectl_bin exec -it emptydir-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.thXjZ0VPI5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GVac10z4KS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it emptydir-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.thXjZ0VPI5 ++ cat /tmp/tmp.GVac10z4KS Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.thXjZ0VPI5 /tmp/tmp.GVac10z4KS ++ return 0 + '[' '' ']' + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/emptydir-pxc + local resource=statefulset/emptydir-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc.yml + local new_result=/tmp/tmp.KzCRvkl1op/statefulset_emptydir-pxc.yml + desc 'compare statefulset/emptydir-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/emptydir-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/emptydir-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-24752", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.CrgJhH0EID ++ mktemp + local LAST_ERR=/tmp/tmp.fezbJHE13z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/emptydir-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CrgJhH0EID + cat /tmp/tmp.fezbJHE13z + rm /tmp/tmp.CrgJhH0EID /tmp/tmp.fezbJHE13z + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-pxc-k127.yml /tmp/tmp.KzCRvkl1op/statefulset_emptydir-pxc.yml + compare_kubectl statefulset/emptydir-proxysql + local resource=statefulset/emptydir-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql.yml + local new_result=/tmp/tmp.KzCRvkl1op/statefulset_emptydir-proxysql.yml + desc 'compare statefulset/emptydir-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/emptydir-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/emptydir-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-24752", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.BT4Pqn5vPM ++ mktemp + local LAST_ERR=/tmp/tmp.7Kc4ny6cku + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/emptydir-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BT4Pqn5vPM + cat /tmp/tmp.7Kc4ny6cku + rm /tmp/tmp.BT4Pqn5vPM /tmp/tmp.7Kc4ny6cku + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_emptydir-proxysql-k127.yml /tmp/tmp.KzCRvkl1op/statefulset_emptydir-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml ++ mktemp + local LAST_OUT=/tmp/tmp.XmTTH5GVlx ++ mktemp + local LAST_ERR=/tmp/tmp.hMVpWomYdL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/emptydir.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XmTTH5GVlx perconaxtradbcluster.pxc.percona.com "emptydir" deleted from storage-24752 namespace + cat /tmp/tmp.hMVpWomYdL + rm /tmp/tmp.XmTTH5GVlx /tmp/tmp.hMVpWomYdL + return 0 + desc 'check hostpath' + set +o xtrace ----------------------------------------------------------------------------------- check hostpath ----------------------------------------------------------------------------------- + check_cr_config hostpath + local cluster=hostpath + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + spinup_pxc hostpath /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml + local cluster=hostpath + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dOnNpSKu8x ++ mktemp + local LAST_ERR=/tmp/tmp.IgL0tT24gj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dOnNpSKu8x secret/my-cluster-secrets unchanged secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.IgL0tT24gj + rm /tmp/tmp.dOnNpSKu8x /tmp/tmp.IgL0tT24gj + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.OEgx2tddKU + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-24752~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.Ez8rq4vdTN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OEgx2tddKU deployment.apps/pxc-client unchanged + cat /tmp/tmp.Ez8rq4vdTN + rm /tmp/tmp.OEgx2tddKU /tmp/tmp.Ez8rq4vdTN + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + local LAST_OUT=/tmp/tmp.gqR7Kb1rX6 + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.storage-24752~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ mktemp + local LAST_ERR=/tmp/tmp.CrVqgd3Klo + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gqR7Kb1rX6 perconaxtradbcluster.pxc.percona.com/hostpath created + cat /tmp/tmp.CrVqgd3Klo + rm /tmp/tmp.gqR7Kb1rX6 /tmp/tmp.CrVqgd3Klo + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy hostpath ++ local target_cluster=hostpath +++ kubectl_bin get pxc hostpath -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TQ1LqOX60L ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P3JTtdvYs1 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc hostpath -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TQ1LqOX60L +++ cat /tmp/tmp.P3JTtdvYs1 +++ rm /tmp/tmp.TQ1LqOX60L /tmp/tmp.P3JTtdvYs1 +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc hostpath -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JbHa4kz2i2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Vxyv1B4TXS +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc hostpath -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.JbHa4kz2i2 +++ cat /tmp/tmp.Vxyv1B4TXS +++ rm /tmp/tmp.JbHa4kz2i2 /tmp/tmp.Vxyv1B4TXS +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo hostpath-proxysql ++ return + local proxy=hostpath-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 ++ mktemp + local LAST_OUT=/tmp/tmp.QbTFEWNB7K ++ mktemp + local LAST_ERR=/tmp/tmp.NKpSq7sPlV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n storage-24752 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.QbTFEWNB7K + cat /tmp/tmp.NKpSq7sPlV error: no matching resources found + rm /tmp/tmp.QbTFEWNB7K /tmp/tmp.NKpSq7sPlV + return 1 + true + wait_for_running hostpath-proxysql 1 + local name=hostpath-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-proxysql-0 480 + local pod=hostpath-proxysql-0 + local max_retry=480 + local ns= ++ echo hostpath-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/hostpath-proxysql-0 condition met waiting for pod/hostpath-proxysql-0 to become Ready.Ok + wait_for_running hostpath-pxc 3 + local name=hostpath-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-pxc-0 480 + local pod=hostpath-pxc-0 + local max_retry=480 + local ns= ++ echo hostpath-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/hostpath-pxc-0 condition met waiting for pod/hostpath-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-pxc-1 480 + local pod=hostpath-pxc-1 + local max_retry=480 + local ns= ++ echo hostpath-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/hostpath-pxc-1 condition met waiting for pod/hostpath-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod hostpath-pxc-2 480 + local pod=hostpath-pxc-2 + local max_retry=480 + local ns= ++ echo hostpath-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/hostpath-pxc-2 condition met waiting for pod/hostpath-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc hostpath -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.6v5hUYpjPj +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mov65ZTFYj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6v5hUYpjPj ++ cat /tmp/tmp.Mov65ZTFYj ++ rm /tmp/tmp.6v5hUYpjPj /tmp/tmp.Mov65ZTFYj ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h hostpath-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h hostpath-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EXxXO1boZP +++ mktemp ++ local LAST_ERR=/tmp/tmp.22yu4lsZ9d ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EXxXO1boZP ++ cat /tmp/tmp.22yu4lsZ9d ++ rm /tmp/tmp.EXxXO1boZP /tmp/tmp.22yu4lsZ9d ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h hostpath-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h hostpath-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z79aGYi7FB +++ mktemp ++ local LAST_ERR=/tmp/tmp.oSdHuy2yc2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Z79aGYi7FB ++ cat /tmp/tmp.oSdHuy2yc2 ++ rm /tmp/tmp.Z79aGYi7FB /tmp/tmp.oSdHuy2yc2 ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h hostpath-pxc-0.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-0.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h hostpath-pxc-0.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-0.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LTRDvnCTsK +++ mktemp ++ local LAST_ERR=/tmp/tmp.PyEx5Ha4FQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LTRDvnCTsK ++ cat /tmp/tmp.PyEx5Ha4FQ ++ rm /tmp/tmp.LTRDvnCTsK /tmp/tmp.PyEx5Ha4FQ ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h hostpath-pxc-1.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-1.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h hostpath-pxc-1.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-1.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jdCTbc0RhF +++ mktemp ++ local LAST_ERR=/tmp/tmp.anRIy6IvNV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jdCTbc0RhF ++ cat /tmp/tmp.anRIy6IvNV ++ rm /tmp/tmp.jdCTbc0RhF /tmp/tmp.anRIy6IvNV ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h hostpath-pxc-2.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-2.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h hostpath-pxc-2.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h hostpath-pxc-2.hostpath-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KjK2B5Fhov +++ mktemp ++ local LAST_ERR=/tmp/tmp.PWFR5Pw3V9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KjK2B5Fhov ++ cat /tmp/tmp.PWFR5Pw3V9 ++ rm /tmp/tmp.KjK2B5Fhov /tmp/tmp.PWFR5Pw3V9 ++ return 0 + client_pod=pxc-client-59944c5bbf-fsz55 + wait_pod pxc-client-59944c5bbf-fsz55 + local pod=pxc-client-59944c5bbf-fsz55 + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-fsz55 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-fsz55 condition met waiting for pod/pxc-client-59944c5bbf-fsz55 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.KzCRvkl1op/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/select-1.sql /tmp/tmp.KzCRvkl1op/select-1.sql ++ is_keyring_plugin_in_use hostpath ++ local cluster=hostpath ++ kubectl_bin exec -it hostpath-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SK0Uz8zGMY +++ mktemp ++ local LAST_ERR=/tmp/tmp.qPvFL4z4Ye ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it hostpath-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SK0Uz8zGMY ++ cat /tmp/tmp.qPvFL4z4Ye Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.SK0Uz8zGMY /tmp/tmp.qPvFL4z4Ye ++ return 0 + '[' '' ']' + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/hostpath-pxc + local resource=statefulset/hostpath-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc.yml + local new_result=/tmp/tmp.KzCRvkl1op/statefulset_hostpath-pxc.yml + desc 'compare statefulset/hostpath-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/hostpath-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/hostpath-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-24752", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.GJf7KmjOee ++ mktemp + local LAST_ERR=/tmp/tmp.aJzpeBOntf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/hostpath-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GJf7KmjOee + cat /tmp/tmp.aJzpeBOntf + rm /tmp/tmp.GJf7KmjOee /tmp/tmp.aJzpeBOntf + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml /tmp/tmp.KzCRvkl1op/statefulset_hostpath-pxc.yml + compare_kubectl statefulset/hostpath-proxysql + local resource=statefulset/hostpath-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql.yml + local new_result=/tmp/tmp.KzCRvkl1op/statefulset_hostpath-proxysql.yml + desc 'compare statefulset/hostpath-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/hostpath-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/hostpath-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("storage-24752", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.GinUWUwIiR ++ mktemp + local LAST_ERR=/tmp/tmp.ZFSp7KevT8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/hostpath-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GinUWUwIiR + cat /tmp/tmp.ZFSp7KevT8 + rm /tmp/tmp.GinUWUwIiR /tmp/tmp.ZFSp7KevT8 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml /tmp/tmp.KzCRvkl1op/statefulset_hostpath-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml ++ mktemp + local LAST_OUT=/tmp/tmp.NKVrn0XNLi ++ mktemp + local LAST_ERR=/tmp/tmp.g0Bcc8QNfW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/storage/conf/hostpath.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NKVrn0XNLi perconaxtradbcluster.pxc.percona.com "hostpath" deleted from storage-24752 namespace + cat /tmp/tmp.g0Bcc8QNfW + rm /tmp/tmp.NKVrn0XNLi /tmp/tmp.g0Bcc8QNfW + return 0 + destroy storage-24752 + local namespace=storage-24752 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.KzCRvkl1op/operator.log + grep -v 'get backup status: Job.batch' +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator + grep -v level=info ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.j6KILwHtJF +++ mktemp ++ local LAST_ERR=/tmp/tmp.cGFnOJcrIn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.j6KILwHtJF ++ cat /tmp/tmp.cGFnOJcrIn ++ rm /tmp/tmp.j6KILwHtJF /tmp/tmp.cGFnOJcrIn ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-859595f865-gjvj2 ++ mktemp + local LAST_OUT=/tmp/tmp.hKExq0KqVk ++ mktemp + local LAST_ERR=/tmp/tmp.ygogMbnEVd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-859595f865-gjvj2 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hKExq0KqVk + cat /tmp/tmp.ygogMbnEVd + rm /tmp/tmp.hKExq0KqVk /tmp/tmp.ygogMbnEVd + return 0 2025-10-10T10:32:55.013Z INFO setup Manager starting up {"gitCommit": "f8e092d7031c2931d0d1772ac8045385e44e3ff9", "gitBranch": "PR-2207-f8e092d7", "buildTime": "2025-10-10T08:21:44Z", "goVersion": "go1.24.8", "os": "linux", "arch": "amd64"} 2025-10-10T10:32:55.013Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.13-gke.1008000"} 2025-10-10T10:32:55.017Z INFO setup Registering Components. 2025-10-10T10:32:57.629Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-10-10T10:32:57.629Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-10-10T10:32:57.629Z INFO controller-runtime.metrics Starting metrics server 2025-10-10T10:32:57.629Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-10-10T10:32:57.629Z INFO controller-runtime.webhook Starting webhook server 2025-10-10T10:32:57.629Z INFO setup Starting the Cmd. 2025-10-10T10:32:57.629Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-10-10T10:32:57.630Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-10-10T10:32:57.630Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-10-10T10:32:57.729Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-10-10T10:33:15.053Z DEBUG events percona-xtradb-cluster-operator-859595f865-gjvj2_be8d3d24-6392-4985-bbbd-f97587619a43 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"6a2532fd-5da9-47ce-b5ba-50c3df633ec2","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1760092395040095009"}, "reason": "LeaderElection"} 2025-10-10T10:33:15.053Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-10-10T10:33:15.054Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-10-10T10:33:15.054Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-10-10T10:33:15.054Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.Secret"} 2025-10-10T10:33:15.054Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-10-10T10:33:15.077Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:15.080Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:15.154Z INFO Starting Controller {"controller": "pxc-controller"} 2025-10-10T10:33:15.154Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2025-10-10T10:33:25.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:25.060Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:35.056Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:35.059Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:45.058Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:45.061Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:47.535Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "9bfb555b-92d9-4487-8049-fae50a216c40", "user": "root"} 2025-10-10T10:33:47.614Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "9bfb555b-92d9-4487-8049-fae50a216c40", "new version": "8.0.43-34.1"} 2025-10-10T10:33:48.976Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "9bfb555b-92d9-4487-8049-fae50a216c40"} 2025-10-10T10:33:54.047Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "649a5667-ae5f-4d16-b9bd-e9b84f10d64b"} 2025-10-10T10:33:55.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:55.060Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:33:58.979Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "8a6504bc-383c-4921-ad48-c1b6a3d57a7f"} 2025-10-10T10:34:04.501Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "d041ccd2-1b56-40a4-8f51-212a49dea5b5"} 2025-10-10T10:34:05.056Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:05.059Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:10.067Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "1962448e-881f-4ce9-85cf-65fb0bb80f82"} 2025-10-10T10:34:15.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:15.059Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:15.449Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "320061f3-1dac-4d49-a61b-d5b556401cd5"} 2025-10-10T10:34:20.773Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "bfde9c88-1eb1-4280-a3ee-0f603f043ad8"} 2025-10-10T10:34:25.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:25.059Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:25.902Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "ef476f4a-6f09-460d-80c1-338a0df4ea89"} 2025-10-10T10:34:31.179Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "c8c10c61-2413-40e0-9fa1-61c8a635216c"} 2025-10-10T10:34:35.063Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:35.069Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:36.515Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "fbfa2082-b6fa-4afc-a69b-c63a4df85557"} 2025-10-10T10:34:41.810Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "87e38565-be6c-4cdb-b386-136861fbbb69"} 2025-10-10T10:34:45.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:45.060Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:47.055Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "c4640da1-966c-4f27-925a-2d3c653e8c06"} 2025-10-10T10:34:52.388Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "514b03d0-449a-4fdd-9167-dbe8fa64507e"} 2025-10-10T10:34:55.057Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterBackup.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterBackup\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:55.059Z ERROR controller-runtime.source.Kind if kind is a CRD, it should be installed before calling Start {"kind": "PerconaXtraDBClusterRestore.pxc.percona.com", "error": "no matches for kind \"PerconaXtraDBClusterRestore\" in version \"pxc.percona.com/v1\""} 2025-10-10T10:34:57.563Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "storage-24752", "name": "hostpath", "reconcileID": "86ea6a4c-ff5d-4e77-82e1-9fa952498fe5"} /go/pkg/mod/k8s.io/apimachinery@v0.34.1/pkg/util/wait/loop.go:53 /go/pkg/mod/k8s.io/apimachinery@v0.34.1/pkg/util/wait/loop.go:54 /go/pkg/mod/k8s.io/apimachinery@v0.34.1/pkg/util/wait/loop.go:87 /go/pkg/mod/k8s.io/apimachinery@v0.34.1/pkg/util/wait/loop.go:88 /go/pkg/mod/k8s.io/apimachinery@v0.34.1/pkg/util/wait/poll.go:33 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.1/pkg/internal/source/kind.go:68 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.1/pkg/internal/source/kind.go:75 k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func1 k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2 k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel sigs.k8s.io/controller-runtime/pkg/internal/source.(*Kind[...]).Start.func1 sigs.k8s.io/controller-runtime/pkg/internal/source.(*Kind[...]).Start.func1.1 + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.FGns5qRNFj ++ mktemp + local LAST_ERR=/tmp/tmp.nVh5xJrW0X + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FGns5qRNFj No resources found + cat /tmp/tmp.nVh5xJrW0X + rm /tmp/tmp.FGns5qRNFj /tmp/tmp.nVh5xJrW0X + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.Evaitcbji8 ++ mktemp + local LAST_ERR=/tmp/tmp.UOp208sw8G + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.Evaitcbji8 + cat /tmp/tmp.UOp208sw8G error: the server doesn't have a resource type "pxc-backup" + rm /tmp/tmp.Evaitcbji8 /tmp/tmp.UOp208sw8G + return 1 + : + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.WLkd6EVDTv ++ mktemp + local LAST_ERR=/tmp/tmp.hnB9cxKkEN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.WLkd6EVDTv + cat /tmp/tmp.hnB9cxKkEN error: the server doesn't have a resource type "pxc-restore" + rm /tmp/tmp.WLkd6EVDTv /tmp/tmp.hnB9cxKkEN + return 1 + : + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.qFdIny91BF ++ mktemp + local LAST_ERR=/tmp/tmp.8tMhsaeUpS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qFdIny91BF validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.8tMhsaeUpS + rm /tmp/tmp.qFdIny91BF /tmp/tmp.8tMhsaeUpS + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.KzCRvkl1op + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace storage-24752 ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.e99yfk0un6 + local LAST_OUT=/tmp/tmp.4poRLatU6u ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.pgtB1YuZbJ + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.qlrejQnfbm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace storage-24752