Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/logs/limits-8-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra limits-9219 + local ns=limits-9219 + '[' -n pxc-operator ']' + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + kubectl patch pxc -n limits-20912 no-limits --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/no-limits patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.8yASyPgYAf ++ mktemp + local LAST_ERR=/tmp/tmp.Lbn6qyzEVx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8yASyPgYAf perconaxtradbcluster.pxc.percona.com "no-limits" deleted from limits-20912 namespace + cat /tmp/tmp.Lbn6qyzEVx + rm /tmp/tmp.8yASyPgYAf /tmp/tmp.Lbn6qyzEVx + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.saER6BxQ65 ++ mktemp + local LAST_ERR=/tmp/tmp.MFa6rK8X1Z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.saER6BxQ65 No resources found + cat /tmp/tmp.MFa6rK8X1Z + rm /tmp/tmp.saER6BxQ65 /tmp/tmp.MFa6rK8X1Z + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.0zpnY8BROt ++ mktemp + local LAST_ERR=/tmp/tmp.hVAKZbT8m9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0zpnY8BROt No resources found + cat /tmp/tmp.hVAKZbT8m9 + rm /tmp/tmp.0zpnY8BROt /tmp/tmp.hVAKZbT8m9 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns ++ mktemp + kubectl_bin get ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.7NM82lsJQw + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_ERR=/tmp/tmp.r1vya8VOvE + local exit_status=0 ++ mktemp + local LAST_OUT=/tmp/tmp.MRXiRtNqtE ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator ++ mktemp + local LAST_ERR=/tmp/tmp.mHzx8SnPlz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MRXiRtNqtE + cat /tmp/tmp.mHzx8SnPlz + rm /tmp/tmp.MRXiRtNqtE /tmp/tmp.mHzx8SnPlz + return 0 namespace "limits-20912" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7NM82lsJQw namespace "pxc-operator" deleted + cat /tmp/tmp.r1vya8VOvE + rm /tmp/tmp.7NM82lsJQw /tmp/tmp.r1vya8VOvE + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VtKtaRwJEx ++ mktemp + local LAST_ERR=/tmp/tmp.ycPXrPqjYg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VtKtaRwJEx namespace/pxc-operator created + cat /tmp/tmp.ycPXrPqjYg + rm /tmp/tmp.VtKtaRwJEx /tmp/tmp.ycPXrPqjYg + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.r593tfFFz7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oWfNogXU00 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.r593tfFFz7 ++ cat /tmp/tmp.oWfNogXU00 ++ rm /tmp/tmp.r593tfFFz7 /tmp/tmp.oWfNogXU00 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Ylzjoucxvd ++ mktemp + local LAST_ERR=/tmp/tmp.NYi9e7ZGV0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ylzjoucxvd Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2" modified. + cat /tmp/tmp.NYi9e7ZGV0 + rm /tmp/tmp.Ylzjoucxvd /tmp/tmp.NYi9e7ZGV0 + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6oh5sHbZrC ++ mktemp + local LAST_ERR=/tmp/tmp.UUtrAoEaQQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6oh5sHbZrC customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.UUtrAoEaQQ + rm /tmp/tmp.6oh5sHbZrC /tmp/tmp.UUtrAoEaQQ + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.7wBja7yiDb ++ mktemp + local LAST_ERR=/tmp/tmp.Uy0rMvGQen + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7wBja7yiDb clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.Uy0rMvGQen + rm /tmp/tmp.7wBja7yiDb /tmp/tmp.Uy0rMvGQen + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + local LAST_OUT=/tmp/tmp.oPe59b5U1U ++ mktemp + local LAST_ERR=/tmp/tmp.z9FIM5VsxD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oPe59b5U1U deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.z9FIM5VsxD + rm /tmp/tmp.oPe59b5U1U /tmp/tmp.z9FIM5VsxD + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.MkFYwtd1fK ++ mktemp + local LAST_ERR=/tmp/tmp.fxlqKYav1t + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MkFYwtd1fK pod/percona-xtradb-cluster-operator-944bd69c8-mh9jq condition met + cat /tmp/tmp.fxlqKYav1t + rm /tmp/tmp.MkFYwtd1fK /tmp/tmp.fxlqKYav1t + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.RZxxvKoaGg +++ mktemp ++ local LAST_ERR=/tmp/tmp.lIEnfa1fUk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RZxxvKoaGg ++ cat /tmp/tmp.lIEnfa1fUk ++ rm /tmp/tmp.RZxxvKoaGg /tmp/tmp.lIEnfa1fUk ++ return 0 + wait_pod percona-xtradb-cluster-operator-944bd69c8-mh9jq 480 pxc-operator + local pod=percona-xtradb-cluster-operator-944bd69c8-mh9jq + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-944bd69c8-mh9jq ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-944bd69c8-mh9jq condition met waiting for pod/percona-xtradb-cluster-operator-944bd69c8-mh9jq to become Ready.Ok + sleep 3 + create_namespace limits-9219 + local namespace=limits-9219 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces limits-9219' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces limits-9219 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace limits-9219 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.XJrvFghAEC + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.SPpWat0tQw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Am2aedD2ns + local LAST_ERR=/tmp/tmp.RVHH4UFgkN + local exit_status=0 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + for i in '$(seq 0 2)' + set +e + kubectl delete namespace limits-9219 + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SPpWat0tQw + cat /tmp/tmp.RVHH4UFgkN + rm /tmp/tmp.SPpWat0tQw /tmp/tmp.RVHH4UFgkN + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace limits-9219 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace limits-9219 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.XJrvFghAEC + cat /tmp/tmp.Am2aedD2ns Error from server (NotFound): namespaces "limits-9219" not found + rm /tmp/tmp.XJrvFghAEC /tmp/tmp.Am2aedD2ns + return 1 + : + wait_for_delete namespace/limits-9219 + local res=namespace/limits-9219 + echo -n 'waiting for namespace/limits-9219 to be deleted' waiting for namespace/limits-9219 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "limits-9219" not found + desc 'create namespace limits-9219' + set +o xtrace ----------------------------------------------------------------------------------- create namespace limits-9219 ----------------------------------------------------------------------------------- + kubectl_bin create namespace limits-9219 ++ mktemp + local LAST_OUT=/tmp/tmp.Y4mIyXy6nO ++ mktemp + local LAST_ERR=/tmp/tmp.wCFElXQSEG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace limits-9219 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y4mIyXy6nO namespace/limits-9219 created + cat /tmp/tmp.wCFElXQSEG + rm /tmp/tmp.Y4mIyXy6nO /tmp/tmp.wCFElXQSEG + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.yz8lbFxjPE +++ mktemp ++ local LAST_ERR=/tmp/tmp.FJ4WruZ7uh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yz8lbFxjPE ++ cat /tmp/tmp.FJ4WruZ7uh ++ rm /tmp/tmp.yz8lbFxjPE /tmp/tmp.FJ4WruZ7uh ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2 --namespace=limits-9219 ++ mktemp + local LAST_OUT=/tmp/tmp.p46SQ1pqlS ++ mktemp + local LAST_ERR=/tmp/tmp.GKXU9AlOzf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2 --namespace=limits-9219 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.p46SQ1pqlS Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster2" modified. + cat /tmp/tmp.GKXU9AlOzf + rm /tmp/tmp.p46SQ1pqlS /tmp/tmp.GKXU9AlOzf + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.FIsurXHkC4 ++ mktemp + local LAST_ERR=/tmp/tmp.mt87gCvmEv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FIsurXHkC4 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.mt87gCvmEv + rm /tmp/tmp.FIsurXHkC4 /tmp/tmp.mt87gCvmEv + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6g0tZ925Lw ++ mktemp + local LAST_ERR=/tmp/tmp.vTIRki0uLQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6g0tZ925Lw secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.vTIRki0uLQ + rm /tmp/tmp.6g0tZ925Lw /tmp/tmp.vTIRki0uLQ + return 0 + desc 'check if possible to create cluster without CPU/Memory limits' + set +o xtrace ----------------------------------------------------------------------------------- check if possible to create cluster without CPU/Memory limits ----------------------------------------------------------------------------------- + check_cr_config no-limits + local cluster=no-limits + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/conf/no-limits.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/conf/no-limits.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/conf/no-limits.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/conf/no-limits.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/conf/no-limits.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.GBY8xG2NYz + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.limits-9219~ + local LAST_ERR=/tmp/tmp.rUR7epQGfE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GBY8xG2NYz perconaxtradbcluster.pxc.percona.com/no-limits created + cat /tmp/tmp.rUR7epQGfE + rm /tmp/tmp.GBY8xG2NYz /tmp/tmp.rUR7epQGfE + return 0 + wait_for_running no-limits-pxc 1 + local name=no-limits-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod no-limits-pxc-0 480 + local pod=no-limits-pxc-0 + local max_retry=480 + local ns= ++ echo no-limits-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/no-limits-pxc-0 condition met waiting for pod/no-limits-pxc-0 to become Ready.Ok + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/no-limits-pxc + local resource=statefulset/no-limits-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc.yml + local new_result=/tmp/tmp.glTg5k2Ec8/statefulset_no-limits-pxc.yml + desc 'compare statefulset/no-limits-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/no-limits-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/no-limits-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("limits-9219", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.72ADzTcVfr ++ mktemp + local LAST_ERR=/tmp/tmp.H4aHPffOtW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/no-limits-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.72ADzTcVfr + cat /tmp/tmp.H4aHPffOtW + rm /tmp/tmp.72ADzTcVfr /tmp/tmp.H4aHPffOtW + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-pxc-k127.yml /tmp/tmp.glTg5k2Ec8/statefulset_no-limits-pxc.yml + log 'compare_kubectl: statefulset/no-limits-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:01:34+0000]' compare_kubectl: statefulset/no-limits-pxc OK [2026-03-11T09:01:34+0000] compare_kubectl: statefulset/no-limits-pxc OK + compare_kubectl statefulset/no-limits-proxysql + local resource=statefulset/no-limits-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql.yml + local new_result=/tmp/tmp.glTg5k2Ec8/statefulset_no-limits-proxysql.yml + desc 'compare statefulset/no-limits-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/no-limits-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/no-limits-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("limits-9219", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.8FsswyWzXb ++ mktemp + local LAST_ERR=/tmp/tmp.5lVc5pPOhS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/no-limits-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8FsswyWzXb + cat /tmp/tmp.5lVc5pPOhS + rm /tmp/tmp.8FsswyWzXb /tmp/tmp.5lVc5pPOhS + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127.yml /tmp/tmp.glTg5k2Ec8/statefulset_no-limits-proxysql.yml --- /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/limits/compare/statefulset_no-limits-proxysql-k127.yml 2026-03-11 07:25:23.062303361 +0000 +++ /tmp/tmp.glTg5k2Ec8/statefulset_no-limits-proxysql.yml 2026-03-11 09:01:34.727608092 +0000 @@ -122,6 +122,8 @@ value: "5" - name: PMM_AGENT_PATHS_TEMPDIR value: /tmp + - name: PROXYSQL_ADMIN_TLS + value: "true" envFrom: - secretRef: name: no-limits-env-vars-proxysql ++ caller + log 'compare_kubectl (20 e2e-tests/limits/run) failed with diff' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:01:34+0000]' compare_kubectl '(20' 'e2e-tests/limits/run)' failed with diff [2026-03-11T09:01:34+0000] compare_kubectl (20 e2e-tests/limits/run) failed with diff + exit 1