Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2428/e2e-tests/logs/users-5-7.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra users-13453 + local ns=users-13453 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n users-6249 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/some-name patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.5hBxCnqMQV ++ mktemp + local LAST_ERR=/tmp/tmp.3qUieaQBun + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5hBxCnqMQV perconaxtradbcluster.pxc.percona.com "some-name" deleted from users-6249 namespace + cat /tmp/tmp.3qUieaQBun + rm /tmp/tmp.5hBxCnqMQV /tmp/tmp.3qUieaQBun + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.HwFI5IuFfp ++ mktemp + local LAST_ERR=/tmp/tmp.mBEONOSnxp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HwFI5IuFfp No resources found + cat /tmp/tmp.mBEONOSnxp + rm /tmp/tmp.HwFI5IuFfp /tmp/tmp.mBEONOSnxp + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.6uv4Nja673 ++ mktemp + local LAST_ERR=/tmp/tmp.oBD8luGns5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6uv4Nja673 No resources found + cat /tmp/tmp.oBD8luGns5 + rm /tmp/tmp.6uv4Nja673 /tmp/tmp.oBD8luGns5 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.WOzJjNbgcz + local LAST_OUT=/tmp/tmp.AnzRcJhLSm ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.r5SAsGOqnx + local exit_status=0 + local LAST_ERR=/tmp/tmp.Vdzf7bhiIr + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AnzRcJhLSm + cat /tmp/tmp.Vdzf7bhiIr + rm /tmp/tmp.AnzRcJhLSm /tmp/tmp.Vdzf7bhiIr + return 0 namespace "users-6249" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WOzJjNbgcz namespace "pxc-operator" deleted + cat /tmp/tmp.r5SAsGOqnx + rm /tmp/tmp.WOzJjNbgcz /tmp/tmp.r5SAsGOqnx + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.HTXW2EK989 ++ mktemp + local LAST_ERR=/tmp/tmp.vQ3UwVJuwN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HTXW2EK989 namespace/pxc-operator created + cat /tmp/tmp.vQ3UwVJuwN + rm /tmp/tmp.HTXW2EK989 /tmp/tmp.vQ3UwVJuwN + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.bWUFm79joE +++ mktemp ++ local LAST_ERR=/tmp/tmp.HG3xEDJ0sy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bWUFm79joE ++ cat /tmp/tmp.HG3xEDJ0sy ++ rm /tmp/tmp.bWUFm79joE /tmp/tmp.HG3xEDJ0sy ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2428-b77708dd-1-cluster6 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2qJvM4fdwp ++ mktemp + local LAST_ERR=/tmp/tmp.VCAMqS8Svu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2428-b77708dd-1-cluster6 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2qJvM4fdwp Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2428-b77708dd-1-cluster6" modified. + cat /tmp/tmp.VCAMqS8Svu + rm /tmp/tmp.2qJvM4fdwp /tmp/tmp.VCAMqS8Svu + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2428/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7YDn3lqKzf ++ mktemp + local LAST_ERR=/tmp/tmp.7YpM5kP2EB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2428/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7YDn3lqKzf customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.7YpM5kP2EB + rm /tmp/tmp.7YDn3lqKzf /tmp/tmp.7YpM5kP2EB + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2428/deploy/cw-rbac.yaml + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.Uem5z7TDVU ++ mktemp + local LAST_ERR=/tmp/tmp.X0K1HsAADH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Uem5z7TDVU clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.X0K1HsAADH + rm /tmp/tmp.Uem5z7TDVU /tmp/tmp.X0K1HsAADH + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2428-b77708dd^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2428/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zQG1l30KVI ++ mktemp + local LAST_ERR=/tmp/tmp.hZS9xde7Bn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zQG1l30KVI deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.hZS9xde7Bn + rm /tmp/tmp.zQG1l30KVI /tmp/tmp.hZS9xde7Bn + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.QPozNe7fo1 ++ mktemp + local LAST_ERR=/tmp/tmp.pX534KDLz9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QPozNe7fo1 pod/percona-xtradb-cluster-operator-6497654795-nw2l2 condition met + cat /tmp/tmp.pX534KDLz9 + rm /tmp/tmp.QPozNe7fo1 /tmp/tmp.pX534KDLz9 + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.cIQI4iSXDm +++ mktemp ++ local LAST_ERR=/tmp/tmp.BQh2cE5hLu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cIQI4iSXDm ++ cat /tmp/tmp.BQh2cE5hLu ++ rm /tmp/tmp.cIQI4iSXDm /tmp/tmp.BQh2cE5hLu ++ return 0 + wait_pod percona-xtradb-cluster-operator-6497654795-nw2l2 480 pxc-operator + local pod=percona-xtradb-cluster-operator-6497654795-nw2l2 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-6497654795-nw2l2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-6497654795-nw2l2 condition met waiting for pod/percona-xtradb-cluster-operator-6497654795-nw2l2 to become Ready.Ok + sleep 3 + create_namespace users-13453 + local namespace=users-13453 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces users-13453' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces users-13453 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace users-13453 + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.6n5clNLoZ6 + local LAST_OUT=/tmp/tmp.gBVNTLKrtw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.zDB3zjFChK + local LAST_ERR=/tmp/tmp.ZqNbVSV6n3 + local exit_status=0 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-13453 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6n5clNLoZ6 + cat /tmp/tmp.zDB3zjFChK + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + rm /tmp/tmp.6n5clNLoZ6 /tmp/tmp.zDB3zjFChK + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-13453 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace users-13453 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.gBVNTLKrtw + cat /tmp/tmp.ZqNbVSV6n3 F0408 19:10:36.422280 5440 cred.go:145] print credential failed with error: Failed to retrieve access token:: failure while executing gcloud, with args [config config-helper --format=json]: signal: terminated (err: ) Unable to connect to the server: getting credentials: exec: executable gke-gcloud-auth-plugin failed with exit code 1 + rm /tmp/tmp.gBVNTLKrtw /tmp/tmp.ZqNbVSV6n3