Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/logs/pitr-gap-errors-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + GTID_PATTERN='[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}:[0-9]+' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + main + create_infra pitr-gap-errors-3874 + local ns=pitr-gap-errors-3874 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n pitr-gap-errors-30959 pitr-gap-errors --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/pitr-gap-errors patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.lZ0gTJPkCZ ++ mktemp + local LAST_ERR=/tmp/tmp.rqgiBwHMct + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lZ0gTJPkCZ perconaxtradbcluster.pxc.percona.com "pitr-gap-errors" deleted from pitr-gap-errors-30959 namespace + cat /tmp/tmp.rqgiBwHMct + rm /tmp/tmp.lZ0gTJPkCZ /tmp/tmp.rqgiBwHMct + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.f1rl4D6zA4 ++ mktemp + local LAST_ERR=/tmp/tmp.XTy7aOcMV8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.f1rl4D6zA4 + cat /tmp/tmp.XTy7aOcMV8 error: the server doesn't have a resource type "pxc-backup" + rm /tmp/tmp.f1rl4D6zA4 /tmp/tmp.XTy7aOcMV8 + return 1 + : + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.S24vxqOmGH ++ mktemp + local LAST_ERR=/tmp/tmp.czpKN8LZLH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.S24vxqOmGH + cat /tmp/tmp.czpKN8LZLH error: the server doesn't have a resource type "pxc-restore" + rm /tmp/tmp.S24vxqOmGH /tmp/tmp.czpKN8LZLH + return 1 + : + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.Rpw1XAvM5i ++ mktemp + local LAST_ERR=/tmp/tmp.afEvWaozyG + local exit_status=0 ++ seq 0 2 ++ mktemp + local LAST_OUT=/tmp/tmp.SMZRQoQh1R ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.g6ZIfe6N78 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Rpw1XAvM5i + cat /tmp/tmp.afEvWaozyG + rm /tmp/tmp.Rpw1XAvM5i /tmp/tmp.afEvWaozyG + return 0 namespace "cert-manager" deleted namespace "pitr-gap-errors-30959" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SMZRQoQh1R namespace "pxc-operator" deleted + cat /tmp/tmp.g6ZIfe6N78 + rm /tmp/tmp.SMZRQoQh1R /tmp/tmp.g6ZIfe6N78 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.zXXyK99eGu ++ mktemp + local LAST_ERR=/tmp/tmp.4k3aP3BI90 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zXXyK99eGu namespace/pxc-operator created + cat /tmp/tmp.4k3aP3BI90 + rm /tmp/tmp.zXXyK99eGu /tmp/tmp.4k3aP3BI90 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.YMzrAIKEPq +++ mktemp ++ local LAST_ERR=/tmp/tmp.HPLCvVGLUb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YMzrAIKEPq ++ cat /tmp/tmp.HPLCvVGLUb ++ rm /tmp/tmp.YMzrAIKEPq /tmp/tmp.HPLCvVGLUb ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.INCz7JzZt5 ++ mktemp + local LAST_ERR=/tmp/tmp.I7DpwOSlUW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.INCz7JzZt5 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2" modified. + cat /tmp/tmp.I7DpwOSlUW + rm /tmp/tmp.INCz7JzZt5 /tmp/tmp.I7DpwOSlUW + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7aIfABkKrA ++ mktemp + local LAST_ERR=/tmp/tmp.GlB0Mcq1Er + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7aIfABkKrA customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.GlB0Mcq1Er + rm /tmp/tmp.7aIfABkKrA /tmp/tmp.GlB0Mcq1Er + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.gZr3C6alQd ++ mktemp + local LAST_ERR=/tmp/tmp.oKSLJdAGD0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gZr3C6alQd clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.oKSLJdAGD0 + rm /tmp/tmp.gZr3C6alQd /tmp/tmp.oKSLJdAGD0 + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/cw-operator.yaml + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + local LAST_OUT=/tmp/tmp.E5whPeUi1W ++ mktemp + local LAST_ERR=/tmp/tmp.YUvXMAd1n6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.E5whPeUi1W deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.YUvXMAd1n6 + rm /tmp/tmp.E5whPeUi1W /tmp/tmp.YUvXMAd1n6 + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.AkcXaCPARm ++ mktemp + local LAST_ERR=/tmp/tmp.4dGh43c10z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AkcXaCPARm pod/percona-xtradb-cluster-operator-859595f865-w72jn condition met + cat /tmp/tmp.4dGh43c10z + rm /tmp/tmp.AkcXaCPARm /tmp/tmp.4dGh43c10z + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.lRpn0joCHs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RitCsuKHhH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lRpn0joCHs ++ cat /tmp/tmp.RitCsuKHhH ++ rm /tmp/tmp.lRpn0joCHs /tmp/tmp.RitCsuKHhH ++ return 0 + wait_pod percona-xtradb-cluster-operator-859595f865-w72jn 480 pxc-operator + local pod=percona-xtradb-cluster-operator-859595f865-w72jn + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-859595f865-w72jn ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-859595f865-w72jn condition met waiting for pod/percona-xtradb-cluster-operator-859595f865-w72jn to become Ready.Ok + sleep 3 + create_namespace pitr-gap-errors-3874 + local namespace=pitr-gap-errors-3874 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-gap-errors-3874' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-gap-errors-3874 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-gap-errors-3874 + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.rpBK7fhndk ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.4QLavA2Zbg + local exit_status=0 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.FAAamK7A4F ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.3b1Fh4o8qn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-gap-errors-3874 + xargs kubectl delete ns + awk '{print$1}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-gap-errors-3874 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rpBK7fhndk + cat /tmp/tmp.4QLavA2Zbg + rm /tmp/tmp.rpBK7fhndk /tmp/tmp.4QLavA2Zbg + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-gap-errors-3874 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.FAAamK7A4F + cat /tmp/tmp.3b1Fh4o8qn Error from server (NotFound): namespaces "pitr-gap-errors-3874" not found + rm /tmp/tmp.FAAamK7A4F /tmp/tmp.3b1Fh4o8qn + return 1 + : + wait_for_delete namespace/pitr-gap-errors-3874 + local res=namespace/pitr-gap-errors-3874 + echo -n 'waiting for namespace/pitr-gap-errors-3874 to be deleted' waiting for namespace/pitr-gap-errors-3874 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pitr-gap-errors-3874" not found + desc 'create namespace pitr-gap-errors-3874' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-gap-errors-3874 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-gap-errors-3874 ++ mktemp + local LAST_OUT=/tmp/tmp.5zBos8YZXJ ++ mktemp + local LAST_ERR=/tmp/tmp.3rM1sWT5Vd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pitr-gap-errors-3874 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5zBos8YZXJ namespace/pitr-gap-errors-3874 created + cat /tmp/tmp.3rM1sWT5Vd + rm /tmp/tmp.5zBos8YZXJ /tmp/tmp.3rM1sWT5Vd + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.8JMkjiWOWw +++ mktemp ++ local LAST_ERR=/tmp/tmp.A1GvUxRqET ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8JMkjiWOWw ++ cat /tmp/tmp.A1GvUxRqET ++ rm /tmp/tmp.8JMkjiWOWw /tmp/tmp.A1GvUxRqET ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2 --namespace=pitr-gap-errors-3874 ++ mktemp + local LAST_OUT=/tmp/tmp.AHN8Aip7wT ++ mktemp + local LAST_ERR=/tmp/tmp.RFbJ6KH8xc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2 --namespace=pitr-gap-errors-3874 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AHN8Aip7wT Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster2" modified. + cat /tmp/tmp.RFbJ6KH8xc + rm /tmp/tmp.AHN8Aip7wT /tmp/tmp.RFbJ6KH8xc + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.iWsoNd2mUm ++ mktemp + local LAST_ERR=/tmp/tmp.lhu2McOeZy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iWsoNd2mUm secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.lhu2McOeZy + rm /tmp/tmp.iWsoNd2mUm /tmp/tmp.lhu2McOeZy + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.aazmZ1r5Ca ++ mktemp + local LAST_ERR=/tmp/tmp.sBiSeDGaDT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.aazmZ1r5Ca namespace/cert-manager created + cat /tmp/tmp.sBiSeDGaDT + rm /tmp/tmp.aazmZ1r5Ca /tmp/tmp.sBiSeDGaDT + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.0onKHvFoan ++ mktemp + local LAST_ERR=/tmp/tmp.zZVTBO8z0P + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0onKHvFoan namespace/cert-manager labeled + cat /tmp/tmp.zZVTBO8z0P + rm /tmp/tmp.0onKHvFoan /tmp/tmp.zZVTBO8z0P + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.JoUKTcodUb ++ mktemp + local LAST_ERR=/tmp/tmp.WQrWVSifJY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JoUKTcodUb namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.WQrWVSifJY Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.JoUKTcodUb /tmp/tmp.WQrWVSifJY + return 0 + '[' '' == 4.10 ']' + sleep 70 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.N4y5qEDvO4 ++ mktemp + local LAST_ERR=/tmp/tmp.l0r6xI4ROJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.N4y5qEDvO4 issuer.cert-manager.io/selfsigning-issuer created + cat /tmp/tmp.l0r6xI4ROJ + rm /tmp/tmp.N4y5qEDvO4 /tmp/tmp.l0r6xI4ROJ + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DkkwIWYvnr ++ mktemp + local LAST_ERR=/tmp/tmp.DdddDuIrYF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DkkwIWYvnr certificate.cert-manager.io/tls-minio created + cat /tmp/tmp.DdddDuIrYF Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.DkkwIWYvnr /tmp/tmp.DdddDuIrYF + return 0 + sleep 25 + start_minio tls-minio + deploy_helm pitr-gap-errors-3874 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "chaos-mesh" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret=tls-minio + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n tls-minio ]] + endpoint=https://minio-service:9000 + minio_args+=(--set tls.enabled=true --set tls.certSecret="$cert_secret" --set tls.publicCrt=tls.crt --set tls.privateKey=tls.key) + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set tls.enabled=true --set tls.certSecret=tls-minio --set tls.publicCrt=tls.crt --set tls.privateKey=tls.key minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set tls.enabled=true --set tls.certSecret=tls-minio --set tls.publicCrt=tls.crt --set tls.privateKey=tls.key minio/minio NAME: minio-service LAST DEPLOYED: Fri Oct 10 09:28:01 2025 NAMESPACE: pitr-gap-errors-3874 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-gap-errors-3874.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-gap-errors-3874 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-gap-errors-3874 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-gap-errors-3874 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-gap-errors-3874 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UdS4ke9Xxx +++ mktemp ++ local LAST_ERR=/tmp/tmp.L62Y2Ip5Qp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UdS4ke9Xxx ++ cat /tmp/tmp.L62Y2Ip5Qp ++ rm /tmp/tmp.UdS4ke9Xxx /tmp/tmp.L62Y2Ip5Qp ++ return 0 + MINIO_POD=minio-service-f887c755-n72xf + wait_pod minio-service-f887c755-n72xf + local pod=minio-service-f887c755-n72xf + local max_retry=480 + local ns= ++ echo minio-service-f887c755-n72xf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minio-service-f887c755-n72xf condition met waiting for pod/minio-service-f887c755-n72xf to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url https://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.czLsysZDIc ++ mktemp + local LAST_ERR=/tmp/tmp.3ETz10TLOj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url https://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.czLsysZDIc /usr/lib/python2.7/site-packages/botocore/vendored/requests/packages/urllib3/connectionpool.py:768: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.org/en/latest/security.html InsecureRequestWarning) make_bucket: operator-testing pod "aws-cli" deleted from pitr-gap-errors-3874 namespace + cat /tmp/tmp.3ETz10TLOj + rm /tmp/tmp.czLsysZDIc /tmp/tmp.3ETz10TLOj + return 0 + cluster=pitr-gap-errors + spinup_pxc pitr-gap-errors /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/pitr-gap-errors.yml + local cluster=pitr-gap-errors + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/pitr-gap-errors.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.f9VacppiOg ++ mktemp + local LAST_ERR=/tmp/tmp.VdjaQeiioy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f9VacppiOg secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.VdjaQeiioy + rm /tmp/tmp.f9VacppiOg /tmp/tmp.VdjaQeiioy + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.7zvTwV6Nwx + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.pitr-gap-errors-3874~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + local LAST_ERR=/tmp/tmp.5mNI4MmWqw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7zvTwV6Nwx deployment.apps/pxc-client created + cat /tmp/tmp.5mNI4MmWqw + rm /tmp/tmp.7zvTwV6Nwx /tmp/tmp.5mNI4MmWqw + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/pitr-gap-errors.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/pitr-gap-errors.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/pitr-gap-errors.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.OyoNoUXSDk + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.pitr-gap-errors-3874~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.Y2IRrt3ttu + local exit_status=0 + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OyoNoUXSDk perconaxtradbcluster.pxc.percona.com/pitr-gap-errors created + cat /tmp/tmp.Y2IRrt3ttu + rm /tmp/tmp.OyoNoUXSDk /tmp/tmp.Y2IRrt3ttu + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy pitr-gap-errors ++ local target_cluster=pitr-gap-errors +++ kubectl_bin get pxc pitr-gap-errors -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dQz9ygYbyt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZYgJrDYsxc +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc pitr-gap-errors -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.dQz9ygYbyt +++ cat /tmp/tmp.ZYgJrDYsxc +++ rm /tmp/tmp.dQz9ygYbyt /tmp/tmp.ZYgJrDYsxc +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc pitr-gap-errors -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LXH1bxBMq6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nEyhKIgnQE +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc pitr-gap-errors -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.LXH1bxBMq6 +++ cat /tmp/tmp.nEyhKIgnQE +++ rm /tmp/tmp.LXH1bxBMq6 /tmp/tmp.nEyhKIgnQE +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo pitr-gap-errors-proxysql ++ return + local proxy=pitr-gap-errors-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n pitr-gap-errors-3874 ++ mktemp + local LAST_OUT=/tmp/tmp.k55mjalZEZ ++ mktemp + local LAST_ERR=/tmp/tmp.bLuEXnT1sk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n pitr-gap-errors-3874 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n pitr-gap-errors-3874 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n pitr-gap-errors-3874 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.k55mjalZEZ + cat /tmp/tmp.bLuEXnT1sk error: no matching resources found + rm /tmp/tmp.k55mjalZEZ /tmp/tmp.bLuEXnT1sk + return 1 + true + wait_for_running pitr-gap-errors-proxysql 1 + local name=pitr-gap-errors-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod pitr-gap-errors-proxysql-0 480 + local pod=pitr-gap-errors-proxysql-0 + local max_retry=480 + local ns= ++ echo pitr-gap-errors-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace Error from server (NotFound): pods "pitr-gap-errors-proxysql-0" not found waiting for pod/pitr-gap-errors-proxysql-0 to become Ready..........Ok + wait_for_running pitr-gap-errors-pxc 3 + local name=pitr-gap-errors-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod pitr-gap-errors-pxc-0 480 + local pod=pitr-gap-errors-pxc-0 + local max_retry=480 + local ns= ++ echo pitr-gap-errors-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/pitr-gap-errors-pxc-0 condition met waiting for pod/pitr-gap-errors-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod pitr-gap-errors-pxc-1 480 + local pod=pitr-gap-errors-pxc-1 + local max_retry=480 + local ns= ++ echo pitr-gap-errors-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/pitr-gap-errors-pxc-1 condition met waiting for pod/pitr-gap-errors-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod pitr-gap-errors-pxc-2 480 + local pod=pitr-gap-errors-pxc-2 + local max_retry=480 + local ns= ++ echo pitr-gap-errors-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/pitr-gap-errors-pxc-2 condition met waiting for pod/pitr-gap-errors-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc pitr-gap-errors -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.UvgTjQZ5E9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iuJC5TuGNQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UvgTjQZ5E9 ++ cat /tmp/tmp.iuJC5TuGNQ ++ rm /tmp/tmp.UvgTjQZ5E9 /tmp/tmp.iuJC5TuGNQ ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h pitr-gap-errors-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h pitr-gap-errors-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jIDJEvbPnG +++ mktemp ++ local LAST_ERR=/tmp/tmp.nxtSrDqqBB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jIDJEvbPnG ++ cat /tmp/tmp.nxtSrDqqBB ++ rm /tmp/tmp.jIDJEvbPnG /tmp/tmp.nxtSrDqqBB ++ return 0 + client_pod=pxc-client-59944c5bbf-l7wvm + wait_pod pxc-client-59944c5bbf-l7wvm + local pod=pxc-client-59944c5bbf-l7wvm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-l7wvm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-l7wvm condition met waiting for pod/pxc-client-59944c5bbf-l7wvm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h pitr-gap-errors-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h pitr-gap-errors-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EeQQFYLANJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CKMJYyffEw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EeQQFYLANJ ++ cat /tmp/tmp.CKMJYyffEw ++ rm /tmp/tmp.EeQQFYLANJ /tmp/tmp.CKMJYyffEw ++ return 0 + client_pod=pxc-client-59944c5bbf-l7wvm + wait_pod pxc-client-59944c5bbf-l7wvm + local pod=pxc-client-59944c5bbf-l7wvm + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-59944c5bbf-l7wvm ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-l7wvm condition met waiting for pod/pxc-client-59944c5bbf-l7wvm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-0.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-0.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-0.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-0.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.svK9utkP7P +++ mktemp ++ local LAST_ERR=/tmp/tmp.dtAhay4Uyx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.svK9utkP7P ++ cat /tmp/tmp.dtAhay4Uyx ++ rm /tmp/tmp.svK9utkP7P /tmp/tmp.dtAhay4Uyx ++ return 0 + client_pod=pxc-client-59944c5bbf-l7wvm + wait_pod pxc-client-59944c5bbf-l7wvm + local pod=pxc-client-59944c5bbf-l7wvm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-l7wvm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-l7wvm condition met waiting for pod/pxc-client-59944c5bbf-l7wvm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ZhrXb27gRy/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql /tmp/tmp.ZhrXb27gRy/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-1.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-1.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-1.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-1.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iSz5K4rqfN +++ mktemp ++ local LAST_ERR=/tmp/tmp.SlSaofBriD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iSz5K4rqfN ++ cat /tmp/tmp.SlSaofBriD ++ rm /tmp/tmp.iSz5K4rqfN /tmp/tmp.SlSaofBriD ++ return 0 + client_pod=pxc-client-59944c5bbf-l7wvm + wait_pod pxc-client-59944c5bbf-l7wvm + local pod=pxc-client-59944c5bbf-l7wvm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-l7wvm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-l7wvm condition met waiting for pod/pxc-client-59944c5bbf-l7wvm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ZhrXb27gRy/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql /tmp/tmp.ZhrXb27gRy/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-2.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-2.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h pitr-gap-errors-pxc-2.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h pitr-gap-errors-pxc-2.pitr-gap-errors-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vwrom08tki +++ mktemp ++ local LAST_ERR=/tmp/tmp.eQJucPmaxv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Vwrom08tki ++ cat /tmp/tmp.eQJucPmaxv ++ rm /tmp/tmp.Vwrom08tki /tmp/tmp.eQJucPmaxv ++ return 0 + client_pod=pxc-client-59944c5bbf-l7wvm + wait_pod pxc-client-59944c5bbf-l7wvm + local pod=pxc-client-59944c5bbf-l7wvm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-l7wvm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-l7wvm condition met waiting for pod/pxc-client-59944c5bbf-l7wvm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ZhrXb27gRy/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/compare/select-1.sql /tmp/tmp.ZhrXb27gRy/select-1.sql ++ is_keyring_plugin_in_use pitr-gap-errors ++ local cluster=pitr-gap-errors ++ kubectl_bin exec -it pitr-gap-errors-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gpLPszn5Nn +++ mktemp ++ local LAST_ERR=/tmp/tmp.xoGYw7vuKB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it pitr-gap-errors-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gpLPszn5Nn ++ cat /tmp/tmp.xoGYw7vuKB Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.gpLPszn5Nn /tmp/tmp.xoGYw7vuKB ++ return 0 + '[' '' ']' + run_backup pitr-gap-errors on-pitr-minio + local cluster=pitr-gap-errors + local backup=on-pitr-minio + log 'run pxc-backup/on-pitr-minio' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-10-10T09:34:14+0000]' run pxc-backup/on-pitr-minio [2025-10-10T09:34:14+0000] run pxc-backup/on-pitr-minio + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/on-pitr-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.1YsbyurYSS ++ mktemp + local LAST_ERR=/tmp/tmp.7TG1sXwZ1Y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/on-pitr-minio.yml + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/on-pitr-minio.yml + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/on-pitr-minio.yml + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.1YsbyurYSS + cat /tmp/tmp.7TG1sXwZ1Y error: resource mapping not found for name: "on-pitr-minio" namespace: "" from "/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/pitr-gap-errors/conf/on-pitr-minio.yml": no matches for kind "PerconaXtraDBClusterBackup" in version "pxc.percona.com/v1" ensure CRDs are installed first + rm /tmp/tmp.1YsbyurYSS /tmp/tmp.7TG1sXwZ1Y + return 1