Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/logs/scheduled-backup-8-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra scheduled-backup-26149 + local ns=scheduled-backup-26149 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n scheduled-backup-22712 scheduled-backup --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/scheduled-backup patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.eU7rDVzg20 ++ mktemp + local LAST_ERR=/tmp/tmp.WmrnTxaEbo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eU7rDVzg20 perconaxtradbcluster.pxc.percona.com "scheduled-backup" deleted from scheduled-backup-22712 namespace + cat /tmp/tmp.WmrnTxaEbo + rm /tmp/tmp.eU7rDVzg20 /tmp/tmp.WmrnTxaEbo + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.wCR2N9s4V8 ++ mktemp + local LAST_ERR=/tmp/tmp.CRTTMjWQPF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wCR2N9s4V8 perconaxtradbclusterbackup.pxc.percona.com "cron-scheduled-backup-pvc-2026494548-q6fav" deleted from scheduled-backup-22712 namespace + cat /tmp/tmp.CRTTMjWQPF + rm /tmp/tmp.wCR2N9s4V8 /tmp/tmp.CRTTMjWQPF + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.DsNVMF1RlY ++ mktemp + local LAST_ERR=/tmp/tmp.n7yfyZJVR7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DsNVMF1RlY No resources found + cat /tmp/tmp.n7yfyZJVR7 + rm /tmp/tmp.DsNVMF1RlY /tmp/tmp.n7yfyZJVR7 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + awk '{print$1}' + kubectl_bin delete namespace pxc-operator + kubectl_bin get ns ++ mktemp ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.DiOISV3qLw + local LAST_OUT=/tmp/tmp.Y5IPpH2zbn ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.9ya4sbRezC + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.oNVTzUCTw9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DiOISV3qLw + cat /tmp/tmp.9ya4sbRezC + rm /tmp/tmp.DiOISV3qLw /tmp/tmp.9ya4sbRezC + return 0 namespace "scheduled-backup-22712" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y5IPpH2zbn namespace "pxc-operator" deleted + cat /tmp/tmp.oNVTzUCTw9 + rm /tmp/tmp.Y5IPpH2zbn /tmp/tmp.oNVTzUCTw9 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0Q8QNAAO0E ++ mktemp + local LAST_ERR=/tmp/tmp.WLIqHgHArO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0Q8QNAAO0E namespace/pxc-operator created + cat /tmp/tmp.WLIqHgHArO + rm /tmp/tmp.0Q8QNAAO0E /tmp/tmp.WLIqHgHArO + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.7vo5bJsUfm +++ mktemp ++ local LAST_ERR=/tmp/tmp.PC7mLbdiHT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7vo5bJsUfm ++ cat /tmp/tmp.PC7mLbdiHT ++ rm /tmp/tmp.7vo5bJsUfm /tmp/tmp.PC7mLbdiHT ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.LkfU2NZExD ++ mktemp + local LAST_ERR=/tmp/tmp.JEi4Sagtvg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LkfU2NZExD Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2" modified. + cat /tmp/tmp.JEi4Sagtvg + rm /tmp/tmp.LkfU2NZExD /tmp/tmp.JEi4Sagtvg + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qktsbNMLDJ ++ mktemp + local LAST_ERR=/tmp/tmp.60PGa6L4NR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qktsbNMLDJ customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.60PGa6L4NR + rm /tmp/tmp.qktsbNMLDJ /tmp/tmp.60PGa6L4NR + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.6wacRm5bK8 ++ mktemp + local LAST_ERR=/tmp/tmp.3ufDRrqehD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6wacRm5bK8 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.3ufDRrqehD + rm /tmp/tmp.6wacRm5bK8 /tmp/tmp.3ufDRrqehD + return 0 + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.9skEtV9apJ ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + local LAST_ERR=/tmp/tmp.eGZKpYh9pK + local exit_status=0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2434-3b65d7fe^' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9skEtV9apJ deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.eGZKpYh9pK + rm /tmp/tmp.9skEtV9apJ /tmp/tmp.eGZKpYh9pK + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.uW8ag0bzOO ++ mktemp + local LAST_ERR=/tmp/tmp.12mxqLwO3x + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uW8ag0bzOO pod/percona-xtradb-cluster-operator-665ff7485-tl4t9 condition met + cat /tmp/tmp.12mxqLwO3x + rm /tmp/tmp.uW8ag0bzOO /tmp/tmp.12mxqLwO3x + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.xckyZSKVWF +++ mktemp ++ local LAST_ERR=/tmp/tmp.ULLXzmblpE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xckyZSKVWF ++ cat /tmp/tmp.ULLXzmblpE ++ rm /tmp/tmp.xckyZSKVWF /tmp/tmp.ULLXzmblpE ++ return 0 + wait_pod percona-xtradb-cluster-operator-665ff7485-tl4t9 480 pxc-operator + local pod=percona-xtradb-cluster-operator-665ff7485-tl4t9 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-665ff7485-tl4t9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-665ff7485-tl4t9 condition met waiting for pod/percona-xtradb-cluster-operator-665ff7485-tl4t9 to become Ready.Ok + sleep 3 + create_namespace scheduled-backup-26149 + local namespace=scheduled-backup-26149 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces scheduled-backup-26149' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces scheduled-backup-26149 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace scheduled-backup-26149 + kubectl_bin get ns ++ mktemp ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.LggRL7YB2Y + local LAST_OUT=/tmp/tmp.WfFTP5hooi ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.QuveVSVqVN + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.KIQz410zKN + local exit_status=0 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-26149 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LggRL7YB2Y + cat /tmp/tmp.QuveVSVqVN + rm /tmp/tmp.LggRL7YB2Y /tmp/tmp.QuveVSVqVN + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-26149 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-26149 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.WfFTP5hooi + cat /tmp/tmp.KIQz410zKN Error from server (NotFound): namespaces "scheduled-backup-26149" not found + rm /tmp/tmp.WfFTP5hooi /tmp/tmp.KIQz410zKN + return 1 + : + wait_for_delete namespace/scheduled-backup-26149 + local res=namespace/scheduled-backup-26149 + echo -n 'waiting for namespace/scheduled-backup-26149 to be deleted' waiting for namespace/scheduled-backup-26149 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "scheduled-backup-26149" not found + desc 'create namespace scheduled-backup-26149' + set +o xtrace ----------------------------------------------------------------------------------- create namespace scheduled-backup-26149 ----------------------------------------------------------------------------------- + kubectl_bin create namespace scheduled-backup-26149 ++ mktemp + local LAST_OUT=/tmp/tmp.na7oSiZhXu ++ mktemp + local LAST_ERR=/tmp/tmp.fZ5wpyuWxW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace scheduled-backup-26149 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.na7oSiZhXu namespace/scheduled-backup-26149 created + cat /tmp/tmp.fZ5wpyuWxW + rm /tmp/tmp.na7oSiZhXu /tmp/tmp.fZ5wpyuWxW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.PxeJfusYCf +++ mktemp ++ local LAST_ERR=/tmp/tmp.KeoRyuvH6y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PxeJfusYCf ++ cat /tmp/tmp.KeoRyuvH6y ++ rm /tmp/tmp.PxeJfusYCf /tmp/tmp.KeoRyuvH6y ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2 --namespace=scheduled-backup-26149 ++ mktemp + local LAST_OUT=/tmp/tmp.f306pWBwyS ++ mktemp + local LAST_ERR=/tmp/tmp.usDcok35hY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2 --namespace=scheduled-backup-26149 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f306pWBwyS Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2434-3b65d7fe-1-cluster2" modified. + cat /tmp/tmp.usDcok35hY + rm /tmp/tmp.f306pWBwyS /tmp/tmp.usDcok35hY + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.eyXHB8lZKK ++ mktemp + local LAST_ERR=/tmp/tmp.rpPwjoXjBd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eyXHB8lZKK secret/minio-secret created secret/aws-s3-secret created secret/do-spaces-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.rpPwjoXjBd + rm /tmp/tmp.eyXHB8lZKK /tmp/tmp.rpPwjoXjBd + return 0 + start_minio + deploy_helm scheduled-backup-26149 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Thu Apr 9 04:09:41 2026 NAMESPACE: scheduled-backup-26149 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.scheduled-backup-26149.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace scheduled-backup-26149 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace scheduled-backup-26149 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace scheduled-backup-26149 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace scheduled-backup-26149 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vVBQ55fKhG +++ mktemp ++ local LAST_ERR=/tmp/tmp.huuIfcoRaL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vVBQ55fKhG ++ cat /tmp/tmp.huuIfcoRaL ++ rm /tmp/tmp.vVBQ55fKhG /tmp/tmp.huuIfcoRaL ++ return 0 + MINIO_POD=minio-service-5fd5489bdc-bnd6s + wait_pod minio-service-5fd5489bdc-bnd6s + local pod=minio-service-5fd5489bdc-bnd6s + local max_retry=480 + local ns= ++ echo minio-service-5fd5489bdc-bnd6s ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minio-service-5fd5489bdc-bnd6s condition met waiting for pod/minio-service-5fd5489bdc-bnd6s to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.gdRvAN2SSR ++ mktemp + local LAST_ERR=/tmp/tmp.5CzBuGKzr5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gdRvAN2SSR pod "aws-cli" deleted from scheduled-backup-26149 namespace + cat /tmp/tmp.5CzBuGKzr5 All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.gdRvAN2SSR /tmp/tmp.5CzBuGKzr5 + return 0 + cluster=scheduled-backup + cat - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.NfBKmT6Hjv ++ mktemp + local LAST_ERR=/tmp/tmp.leArIYT0JM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NfBKmT6Hjv priorityclass.scheduling.k8s.io/high-priority configured + cat /tmp/tmp.leArIYT0JM + rm /tmp/tmp.NfBKmT6Hjv /tmp/tmp.leArIYT0JM + return 0 + spinup_pxc scheduled-backup /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local cluster=scheduled-backup + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RVYcSTi73M ++ mktemp + local LAST_ERR=/tmp/tmp.zBuu1hU7Y6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RVYcSTi73M secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.zBuu1hU7Y6 + rm /tmp/tmp.RVYcSTi73M /tmp/tmp.zBuu1hU7Y6 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.ob8CrzS05L + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + local LAST_ERR=/tmp/tmp.JxsfXKoOod + local exit_status=0 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-26149~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2434-3b65d7fe#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ob8CrzS05L deployment.apps/pxc-client created + cat /tmp/tmp.JxsfXKoOod + rm /tmp/tmp.ob8CrzS05L /tmp/tmp.JxsfXKoOod + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2434/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.vHZCBg9b8N + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2434-3b65d7fe#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-26149~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.Oi77crhRCf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vHZCBg9b8N perconaxtradbcluster.pxc.percona.com/scheduled-backup created + cat /tmp/tmp.Oi77crhRCf + rm /tmp/tmp.vHZCBg9b8N /tmp/tmp.Oi77crhRCf + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy scheduled-backup ++ local target_cluster=scheduled-backup +++ kubectl_bin get pxc scheduled-backup -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vz3emwLvCE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2Z5c19OzRj +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc scheduled-backup -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Vz3emwLvCE +++ cat /tmp/tmp.2Z5c19OzRj +++ rm /tmp/tmp.Vz3emwLvCE /tmp/tmp.2Z5c19OzRj +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc scheduled-backup -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.70r4jxLZg8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DPU0s35RUj +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc scheduled-backup -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.70r4jxLZg8 +++ cat /tmp/tmp.DPU0s35RUj +++ rm /tmp/tmp.70r4jxLZg8 /tmp/tmp.DPU0s35RUj +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo scheduled-backup-proxysql ++ return + local proxy=scheduled-backup-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-26149 ++ mktemp + local LAST_OUT=/tmp/tmp.M15aHBmYm9 ++ mktemp + local LAST_ERR=/tmp/tmp.vpasrZCvhO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-26149 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-26149 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-26149 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.M15aHBmYm9 + cat /tmp/tmp.vpasrZCvhO error: no matching resources found + rm /tmp/tmp.M15aHBmYm9 /tmp/tmp.vpasrZCvhO + return 1 + true + wait_for_running scheduled-backup-proxysql 1 + local name=scheduled-backup-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-proxysql-0 480 + local pod=scheduled-backup-proxysql-0 + local max_retry=480 + local ns= ++ echo scheduled-backup-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/scheduled-backup-proxysql-0 condition met waiting for pod/scheduled-backup-proxysql-0 to become Ready.Ok + wait_for_running scheduled-backup-pxc 3 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 480 + local pod=scheduled-backup-pxc-0 + local max_retry=480 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 480 + local pod=scheduled-backup-pxc-1 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace