Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/logs/smart-update1-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + API=pxc.percona.com/v9-9-9 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + CLUSTER=smart-update + CLUSTER_SIZE=3 + PROXY_SIZE=2 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 + TARGET_IMAGE_PXC_VS=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + VS_URL=http://version-service + VS_PORT=11000 + VS_ENDPOINT=http://version-service:11000 + main + create_infra smart-update1-3939 + local ns=smart-update1-3939 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n smart-update1-11215 smart-update --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/smart-update patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.z0k7V1gB6O ++ mktemp + local LAST_ERR=/tmp/tmp.Gj9BFm4oqu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.z0k7V1gB6O perconaxtradbcluster.pxc.percona.com "smart-update" deleted from smart-update1-11215 namespace + cat /tmp/tmp.Gj9BFm4oqu + rm /tmp/tmp.z0k7V1gB6O /tmp/tmp.Gj9BFm4oqu + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.s07PbjSuHk ++ mktemp + local LAST_ERR=/tmp/tmp.gzszhBA1Bo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.s07PbjSuHk No resources found + cat /tmp/tmp.gzszhBA1Bo + rm /tmp/tmp.s07PbjSuHk /tmp/tmp.gzszhBA1Bo + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.fubk7FQLBs ++ mktemp + local LAST_ERR=/tmp/tmp.2GDwI0S7k6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fubk7FQLBs No resources found + cat /tmp/tmp.2GDwI0S7k6 + rm /tmp/tmp.fubk7FQLBs /tmp/tmp.2GDwI0S7k6 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' ++ mktemp + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_OUT=/tmp/tmp.9MqVOxxRKc ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.VzGLPeyNun + local LAST_OUT=/tmp/tmp.IVEmyxODPB + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + local LAST_ERR=/tmp/tmp.NkbIOAimUG + kubectl get ns + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9MqVOxxRKc + cat /tmp/tmp.VzGLPeyNun + rm /tmp/tmp.9MqVOxxRKc /tmp/tmp.VzGLPeyNun + return 0 namespace "cert-manager" deleted namespace "smart-update1-11215" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IVEmyxODPB namespace "pxc-operator" deleted + cat /tmp/tmp.NkbIOAimUG + rm /tmp/tmp.IVEmyxODPB /tmp/tmp.NkbIOAimUG + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4Eufdr0lLp ++ mktemp + local LAST_ERR=/tmp/tmp.3jyWcQckCY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4Eufdr0lLp namespace/pxc-operator created + cat /tmp/tmp.3jyWcQckCY + rm /tmp/tmp.4Eufdr0lLp /tmp/tmp.3jyWcQckCY + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.V8dDaMsyOc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dgul1g6CZ5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.V8dDaMsyOc ++ cat /tmp/tmp.Dgul1g6CZ5 ++ rm /tmp/tmp.V8dDaMsyOc /tmp/tmp.Dgul1g6CZ5 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ZRGOXMijxB ++ mktemp + local LAST_ERR=/tmp/tmp.haBbyrVi7N + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZRGOXMijxB Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2" modified. + cat /tmp/tmp.haBbyrVi7N + rm /tmp/tmp.ZRGOXMijxB /tmp/tmp.haBbyrVi7N + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gZgYnrJh3t ++ mktemp + local LAST_ERR=/tmp/tmp.5bqECreBwE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gZgYnrJh3t customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.5bqECreBwE + rm /tmp/tmp.gZgYnrJh3t /tmp/tmp.5bqECreBwE + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.q4WvN7U2kw ++ mktemp + local LAST_ERR=/tmp/tmp.2Q9BOpZfnv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.q4WvN7U2kw clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.2Q9BOpZfnv + rm /tmp/tmp.q4WvN7U2kw /tmp/tmp.2Q9BOpZfnv + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2264-54845288^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/deploy/cw-operator.yaml + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - ++ mktemp + local LAST_OUT=/tmp/tmp.pJ7daYTy7X ++ mktemp + local LAST_ERR=/tmp/tmp.pWRQBOhvjm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pJ7daYTy7X deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.pWRQBOhvjm + rm /tmp/tmp.pJ7daYTy7X /tmp/tmp.pWRQBOhvjm + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.iCWFZdPRPv ++ mktemp + local LAST_ERR=/tmp/tmp.vcn1ImV33c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iCWFZdPRPv pod/percona-xtradb-cluster-operator-7cb9576bf9-w9fks condition met + cat /tmp/tmp.vcn1ImV33c + rm /tmp/tmp.iCWFZdPRPv /tmp/tmp.vcn1ImV33c + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.oMsouoOlPp +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2lqBEAEr8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oMsouoOlPp ++ cat /tmp/tmp.p2lqBEAEr8 ++ rm /tmp/tmp.oMsouoOlPp /tmp/tmp.p2lqBEAEr8 ++ return 0 + wait_pod percona-xtradb-cluster-operator-7cb9576bf9-w9fks 480 pxc-operator + local pod=percona-xtradb-cluster-operator-7cb9576bf9-w9fks + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-7cb9576bf9-w9fks ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-7cb9576bf9-w9fks condition met waiting for pod/percona-xtradb-cluster-operator-7cb9576bf9-w9fks to become Ready.Ok + sleep 3 + create_namespace smart-update1-3939 + local namespace=smart-update1-3939 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces smart-update1-3939' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces smart-update1-3939 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace smart-update1-3939 + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.om1RvFaIyl + local LAST_OUT=/tmp/tmp.lpHDH3Hmki ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.8K3trFYDrv + local LAST_ERR=/tmp/tmp.lRtOnpMPx5 + local exit_status=0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-3939 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-3939 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lpHDH3Hmki + cat /tmp/tmp.lRtOnpMPx5 + rm /tmp/tmp.lpHDH3Hmki /tmp/tmp.lRtOnpMPx5 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace smart-update1-3939 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.om1RvFaIyl + cat /tmp/tmp.8K3trFYDrv Error from server (NotFound): namespaces "smart-update1-3939" not found + rm /tmp/tmp.om1RvFaIyl /tmp/tmp.8K3trFYDrv + return 1 + : + wait_for_delete namespace/smart-update1-3939 + local res=namespace/smart-update1-3939 + echo -n 'waiting for namespace/smart-update1-3939 to be deleted' waiting for namespace/smart-update1-3939 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "smart-update1-3939" not found + desc 'create namespace smart-update1-3939' + set +o xtrace ----------------------------------------------------------------------------------- create namespace smart-update1-3939 ----------------------------------------------------------------------------------- + kubectl_bin create namespace smart-update1-3939 ++ mktemp + local LAST_OUT=/tmp/tmp.gxXB8QYcWb ++ mktemp + local LAST_ERR=/tmp/tmp.H8QSHD7aTR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace smart-update1-3939 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gxXB8QYcWb namespace/smart-update1-3939 created + cat /tmp/tmp.H8QSHD7aTR + rm /tmp/tmp.gxXB8QYcWb /tmp/tmp.H8QSHD7aTR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.9ts6sdA5yA +++ mktemp ++ local LAST_ERR=/tmp/tmp.NfzV9hGTBK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9ts6sdA5yA ++ cat /tmp/tmp.NfzV9hGTBK ++ rm /tmp/tmp.9ts6sdA5yA /tmp/tmp.NfzV9hGTBK ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2 --namespace=smart-update1-3939 ++ mktemp + local LAST_OUT=/tmp/tmp.6RxDTsuVfy ++ mktemp + local LAST_ERR=/tmp/tmp.AKmZAdFTgu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2 --namespace=smart-update1-3939 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6RxDTsuVfy Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2264-54845288-7-cluster2" modified. + cat /tmp/tmp.AKmZAdFTgu + rm /tmp/tmp.6RxDTsuVfy /tmp/tmp.AKmZAdFTgu + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yQdE5GNtAC ++ mktemp + local LAST_ERR=/tmp/tmp.MVkZbyN54g + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yQdE5GNtAC secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.MVkZbyN54g + rm /tmp/tmp.yQdE5GNtAC /tmp/tmp.MVkZbyN54g + return 0 + deploy_version_service + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.RFeUhgYQ3Z ++ mktemp + local LAST_ERR=/tmp/tmp.2DcEplapB3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create configmap versions --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RFeUhgYQ3Z configmap/versions created + cat /tmp/tmp.2DcEplapB3 + rm /tmp/tmp.RFeUhgYQ3Z /tmp/tmp.2DcEplapB3 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BlqxYaNCTb ++ mktemp + local LAST_ERR=/tmp/tmp.IqcdmM65j2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BlqxYaNCTb deployment.apps/version-service created service/version-service created + cat /tmp/tmp.IqcdmM65j2 + rm /tmp/tmp.BlqxYaNCTb /tmp/tmp.IqcdmM65j2 + return 0 + sleep 10 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.cYr1w6iXbA ++ mktemp + local LAST_ERR=/tmp/tmp.eLbWPHOhHl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cYr1w6iXbA namespace/cert-manager created + cat /tmp/tmp.eLbWPHOhHl + rm /tmp/tmp.cYr1w6iXbA /tmp/tmp.eLbWPHOhHl + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.dO8VQFgDbr ++ mktemp + local LAST_ERR=/tmp/tmp.A3VyvsZlb4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dO8VQFgDbr namespace/cert-manager labeled + cat /tmp/tmp.A3VyvsZlb4 + rm /tmp/tmp.dO8VQFgDbr /tmp/tmp.A3VyvsZlb4 + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.lYedWhJGmn ++ mktemp + local LAST_ERR=/tmp/tmp.UGHBMVRSqr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lYedWhJGmn namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.UGHBMVRSqr Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.lYedWhJGmn /tmp/tmp.UGHBMVRSqr + return 0 + '[' '' == 4.10 ']' + sleep 70 ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ grep :8.0 +++ get_operator_pod +++ local label_prefix=app.kubernetes.io/ ++ tail -n3 ++ head -n1 ++ sort -V ++++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++++ grep -c percona-xtradb-cluster-operator +++ local check_label=1 +++ [[ 1 -eq 0 ]] +++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XMKh3mmCPh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ke10pDRbRp +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.XMKh3mmCPh +++ cat /tmp/tmp.Ke10pDRbRp +++ rm /tmp/tmp.XMKh3mmCPh /tmp/tmp.Ke10pDRbRp +++ return 0 ++ kubectl_bin exec -ti percona-xtradb-cluster-operator-7cb9576bf9-w9fks -n pxc-operator -- curl -s http://version-service.smart-update1-3939.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 +++ mktemp ++ local LAST_OUT=/tmp/tmp.d7l9nCkr95 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4gaSd0jAWT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -ti percona-xtradb-cluster-operator-7cb9576bf9-w9fks -n pxc-operator -- curl -s http://version-service.smart-update1-3939.svc.cluster.local:11000/versions/v1/pxc-operator/9.9.9 ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.d7l9nCkr95 ++ cat /tmp/tmp.4gaSd0jAWT Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.d7l9nCkr95 /tmp/tmp.4gaSd0jAWT ++ return 0 + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.19-10.1 + desc 'patch crd' + set +o xtrace ----------------------------------------------------------------------------------- patch crd ----------------------------------------------------------------------------------- + kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.FKQAPTnSCM ++ mktemp + local LAST_ERR=/tmp/tmp.SS9xsLDL7Q + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch crd perconaxtradbclusters.pxc.percona.com --type=json -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FKQAPTnSCM customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com patched + cat /tmp/tmp.SS9xsLDL7Q + rm /tmp/tmp.FKQAPTnSCM /tmp/tmp.SS9xsLDL7Q + return 0 + desc 'Updating ProxySQL PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- Updating ProxySQL PXC cluster ----------------------------------------------------------------------------------- + cp -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/smart-update1/conf/smart-update.yml /tmp/tmp.oFjlHwm1fw/smart-update.yml + yq -i eval '.spec.initContainer.image = "perconalab/percona-xtradb-cluster-operator:PR-2264-54845288"' /tmp/tmp.oFjlHwm1fw/smart-update.yml + spinup_pxc smart-update /tmp/tmp.oFjlHwm1fw/smart-update.yml + local cluster=smart-update + local config=/tmp/tmp.oFjlHwm1fw/smart-update.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.H0SzA1pUOd ++ mktemp + local LAST_ERR=/tmp/tmp.qnPuFWDxiD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H0SzA1pUOd secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.qnPuFWDxiD + rm /tmp/tmp.H0SzA1pUOd /tmp/tmp.qnPuFWDxiD + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2264/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' + local LAST_OUT=/tmp/tmp.kHIgN6s9AI + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update1-3939~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2264-54845288#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.196GSXVLFX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kHIgN6s9AI deployment.apps/pxc-client created + cat /tmp/tmp.196GSXVLFX + rm /tmp/tmp.kHIgN6s9AI /tmp/tmp.196GSXVLFX + return 0 + [[ percona/percona-xtradb-cluster:8.0.19-10.1 =~ 5\.7 ]] + apply_config /tmp/tmp.oFjlHwm1fw/smart-update.yml + '[' -z '' ']' + cat_config /tmp/tmp.oFjlHwm1fw/smart-update.yml + kubectl_bin apply -f - + cat /tmp/tmp.oFjlHwm1fw/smart-update.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v9-9-9#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_OUT=/tmp/tmp.fZLX02xqDl + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.19-10.1#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.smart-update1-3939~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.DDmOzIrpBS + local exit_status=0 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2264-54845288#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fZLX02xqDl perconaxtradbcluster.pxc.percona.com/smart-update created + cat /tmp/tmp.DDmOzIrpBS + rm /tmp/tmp.fZLX02xqDl /tmp/tmp.DDmOzIrpBS + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy smart-update ++ local target_cluster=smart-update +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wc7exw8eeA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SV4xPrB8aH +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.wc7exw8eeA +++ cat /tmp/tmp.SV4xPrB8aH +++ rm /tmp/tmp.wc7exw8eeA /tmp/tmp.SV4xPrB8aH +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0w7L36EO9U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FfrNubBeS6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc smart-update -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.0w7L36EO9U +++ cat /tmp/tmp.FfrNubBeS6 +++ rm /tmp/tmp.0w7L36EO9U /tmp/tmp.FfrNubBeS6 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo smart-update-proxysql ++ return + local proxy=smart-update-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-3939 ++ mktemp + local LAST_OUT=/tmp/tmp.kofA7dDl3j ++ mktemp + local LAST_ERR=/tmp/tmp.wL4xz0biaQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-3939 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-3939 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n smart-update1-3939 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.kofA7dDl3j + cat /tmp/tmp.wL4xz0biaQ error: no matching resources found + rm /tmp/tmp.kofA7dDl3j /tmp/tmp.wL4xz0biaQ + return 1 + true + wait_for_running smart-update-proxysql 1 + local name=smart-update-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-proxysql-0 480 + local pod=smart-update-proxysql-0 + local max_retry=480 + local ns= ++ echo smart-update-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace Error from server (NotFound): pods "smart-update-proxysql-0" not found waiting for pod/smart-update-proxysql-0 to become Ready..............Ok + wait_for_running smart-update-pxc 3 + local name=smart-update-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod smart-update-pxc-0 480 + local pod=smart-update-pxc-0 + local max_retry=480 + local ns= ++ echo smart-update-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/smart-update-pxc-0 condition met waiting for pod/smart-update-pxc-0 to become Ready.full cluster crash detected