++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/smart-update.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/smart-update.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ jq -r .serverVersion.gitVersion +++ kubectl version -o json +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.15.1 ++ '[' v3 == v2 ']' + main + create_namespace smart-update-11317 + local namespace=smart-update-11317 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.5ZYYpbvd6S + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.TarJdDTWH7 ++ mktemp + local LAST_ERR=/tmp/tmp.s7VXqtRh4i + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + awk '{print$1}' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + local LAST_ERR=/tmp/tmp.ZtZHgvEBWS + local exit_status=0 + local timeout=4 + set +o errexit + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TarJdDTWH7 + cat /tmp/tmp.s7VXqtRh4i + rm /tmp/tmp.TarJdDTWH7 /tmp/tmp.s7VXqtRh4i + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.5ZYYpbvd6S + cat /tmp/tmp.ZtZHgvEBWS Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.5ZYYpbvd6S /tmp/tmp.ZtZHgvEBWS + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.DnRXCQJlLP ++ mktemp + local LAST_ERR=/tmp/tmp.4VVsHFke0v + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.DnRXCQJlLP + cat /tmp/tmp.4VVsHFke0v Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.DnRXCQJlLP /tmp/tmp.4VVsHFke0v + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace smart-update-11317 ++ mktemp + local LAST_OUT=/tmp/tmp.GPoxGvnWpt ++ mktemp + local LAST_ERR=/tmp/tmp.dE2KBCpmJr + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete namespace smart-update-11317 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete namespace smart-update-11317 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete namespace smart-update-11317 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.GPoxGvnWpt + cat /tmp/tmp.dE2KBCpmJr Error from server (NotFound): namespaces "smart-update-11317" not found + rm /tmp/tmp.GPoxGvnWpt /tmp/tmp.dE2KBCpmJr + return 1 + : + wait_for_delete namespace/smart-update-11317 + local res=namespace/smart-update-11317 + set +o xtrace namespace/smart-update-11317 - Error from server (NotFound): namespaces "smart-update-11317" not found + kubectl_bin create namespace smart-update-11317 ++ mktemp + local LAST_OUT=/tmp/tmp.AfHrE2wFoF ++ mktemp + local LAST_ERR=/tmp/tmp.IJEKZWRB8k + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl create namespace smart-update-11317 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.AfHrE2wFoF namespace/smart-update-11317 created + cat /tmp/tmp.IJEKZWRB8k + rm /tmp/tmp.AfHrE2wFoF /tmp/tmp.IJEKZWRB8k + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.hU7NS2I9Cq +++ mktemp ++ local LAST_ERR=/tmp/tmp.1VUrNibL5h ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.hU7NS2I9Cq ++ cat /tmp/tmp.1VUrNibL5h ++ rm /tmp/tmp.hU7NS2I9Cq /tmp/tmp.1VUrNibL5h ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-upgrade --namespace=smart-update-11317 ++ mktemp + local LAST_OUT=/tmp/tmp.RObrH9UMZz ++ mktemp + local LAST_ERR=/tmp/tmp.K3MIpcIL1W + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-upgrade --namespace=smart-update-11317 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.RObrH9UMZz Context "gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-upgrade" modified. + cat /tmp/tmp.K3MIpcIL1W + rm /tmp/tmp.RObrH9UMZz /tmp/tmp.K3MIpcIL1W + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=smart-update-11317 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml disabled install smart-update-11317 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=smart-update-11317 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "smart-update-11317"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - + /usr/bin/sed -e 's#^namespace: .*#namespace: "smart-update-11317"#g' ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 ++ cut -d: -f1 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-776-0087ef129"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml metadata.namespace smart-update-11317 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-776-0087ef129-pgo-deployer + yq w -d3 - 'subjects[0].namespace' smart-update-11317 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.AQPkEqWGwq/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "smart-update-11317" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-776-0087ef129" pgo_operator_namespace: "smart-update-11317"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.AQPkEqWGwq/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.h86Bd2SzW0 ++ mktemp + local LAST_ERR=/tmp/tmp.SNLx69RMzy + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl apply -f /tmp/tmp.AQPkEqWGwq/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.h86Bd2SzW0 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.SNLx69RMzy + rm /tmp/tmp.h86Bd2SzW0 /tmp/tmp.SNLx69RMzy + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=smart-update-11317 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ grep -io 'not found' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gwDrxY6SRh ++ mktemp + local LAST_ERR=/tmp/tmp.X0NaY15Drf + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.gwDrxY6SRh serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.X0NaY15Drf + rm /tmp/tmp.gwDrxY6SRh /tmp/tmp.X0NaY15Drf + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZsrQnZ0diU +++ mktemp ++ local LAST_ERR=/tmp/tmp.2yUlwMFM8Z ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ZsrQnZ0diU ++ cat /tmp/tmp.2yUlwMFM8Z ++ rm /tmp/tmp.ZsrQnZ0diU /tmp/tmp.2yUlwMFM8Z ++ return 0 + wait_pod postgres-operator-575c4cf5b4-h86bl + local pod=postgres-operator-575c4cf5b4-h86bl + local ns= + set +o xtrace postgres-operator-575c4cf5b4-h86bltrue + [[ -n '' ]] + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + cluster=cluster1 + PGO_TAG=1.0.0 + PG_VER=13 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Ry17E0jq5v ++ mktemp + local LAST_ERR=/tmp/tmp.b82Em5xcPD + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Ry17E0jq5v deployment.apps/pg-client created + cat /tmp/tmp.b82Em5xcPD + rm /tmp/tmp.Ry17E0jq5v /tmp/tmp.b82Em5xcPD + return 0 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.0.0-ppg13-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.0.0-ppg13-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.0.0-ppg13-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/1.0.0_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/1.0.0_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + local namespace=smart-update-11317 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 smart-update-11317 + local cluster=cluster1 + local namespace=smart-update-11317 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + yq w - metadata.name cluster1-users-secret + kubectl_bin -n smart-update-11317 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZEosFxxZC8 ++ mktemp + local LAST_ERR=/tmp/tmp.hCqwpcREmc + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZEosFxxZC8 secret/cluster1-users-secret created + cat /tmp/tmp.hCqwpcREmc + rm /tmp/tmp.ZEosFxxZC8 /tmp/tmp.hCqwpcREmc + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml + kubectl_bin -n smart-update-11317 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.hDOfo60R17 ++ mktemp + yq w - metadata.name cluster1-pmm-secret + local LAST_ERR=/tmp/tmp.7dD3BgPAIm + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hDOfo60R17 secret/cluster1-pmm-secret created + cat /tmp/tmp.7dD3BgPAIm + rm /tmp/tmp.hDOfo60R17 /tmp/tmp.7dD3BgPAIm + return 0 + kubectl_bin -n smart-update-11317 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.nOIT6m4MMc ++ mktemp + local LAST_ERR=/tmp/tmp.TmHIWv7MkY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nOIT6m4MMc deployment.apps/pg-client unchanged + cat /tmp/tmp.TmHIWv7MkY + rm /tmp/tmp.nOIT6m4MMc /tmp/tmp.TmHIWv7MkY + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/1.0.0_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/1.0.0_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.name cluster1 + yq w - spec.database cluster1 + yq w - spec.name cluster1 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest-repo + yq w - spec.backup.image percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest + yq w - spec.keepData false + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.0.0-ppg13-postgres-ha + yq w - spec.secretsName cluster1-users-secret + yq w - metadata.name cluster1 + yq w - metadata.labels.pg-cluster cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - spec.clustername cluster1 + yq w - spec.keepBackups false + yq w - spec.user some-name + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/1.0.0_cr.yaml metadata.annotations.current-primary cluster1 + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.0.0-ppg13-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.2 + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.0.0-ppg13-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.AQPkEqWGwq/cr.yaml -n smart-update-11317 ++ mktemp + local LAST_OUT=/tmp/tmp.GkubFr2KMv ++ mktemp + local LAST_ERR=/tmp/tmp.2yruAjri3o + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl apply -f /tmp/tmp.AQPkEqWGwq/cr.yaml -n smart-update-11317 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.GkubFr2KMv perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.2yruAjri3o + rm /tmp/tmp.GkubFr2KMv /tmp/tmp.2yruAjri3o + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=smart-update-11317 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo.. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=smart-update-11317 + sleep 10 + set +o xtrace cluster1............. + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=smart-update-11317 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=smart-update-11317 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=smart-update-11317 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YxMZjosZJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.9v4riFLaDa ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.YxMZjosZJz ++ cat /tmp/tmp.9v4riFLaDa ++ rm /tmp/tmp.YxMZjosZJz /tmp/tmp.9v4riFLaDa ++ return 0 + status_query='kubectl_bin -n smart-update-11317 get pgcluster/cluster1 -o jsonpath='\''{.status.state}'\''' + local retry=0 ++ eval kubectl_bin -n smart-update-11317 get pgcluster/cluster1 -o 'jsonpath='\''{.status.state}'\''' +++ kubectl_bin -n smart-update-11317 get pgcluster/cluster1 -o 'jsonpath={.status.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nNNOqzVd8q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bgQqHa3EiO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get pgcluster/cluster1 -o 'jsonpath={.status.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nNNOqzVd8q +++ cat /tmp/tmp.bgQqHa3EiO +++ rm /tmp/tmp.nNNOqzVd8q /tmp/tmp.bgQqHa3EiO +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=smart-update-11317 +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UfuQkTHJER ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oXZU6lL200 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UfuQkTHJER +++ cat /tmp/tmp.oXZU6lL200 +++ rm /tmp/tmp.UfuQkTHJER /tmp/tmp.oXZU6lL200 +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NplvnFxHjm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GzjZowXV7p +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.NplvnFxHjm +++ cat /tmp/tmp.GzjZowXV7p +++ rm /tmp/tmp.NplvnFxHjm /tmp/tmp.GzjZowXV7p +++ return 0 +++ kubectl_bin -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ base64 -d ++++ mktemp +++ local LAST_OUT=/tmp/tmp.y9UMBAxyCr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GR7nuhuJEP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.y9UMBAxyCr +++ cat /tmp/tmp.GR7nuhuJEP +++ rm /tmp/tmp.y9UMBAxyCr /tmp/tmp.GR7nuhuJEP +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.smart-update-11317 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.smart-update-11317 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.znJM8RReuE +++ mktemp ++ local LAST_ERR=/tmp/tmp.gfYeHl3hIN ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.znJM8RReuE ++ cat /tmp/tmp.gfYeHl3hIN ++ rm /tmp/tmp.znJM8RReuE /tmp/tmp.gfYeHl3hIN ++ return 0 + local client_container=pg-client-76df9df876-5hkqg + kubectl_bin exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.w5XpFompvS ++ mktemp + local LAST_ERR=/tmp/tmp.sGLBaZEQ3w + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.w5XpFompvS + cat /tmp/tmp.sGLBaZEQ3w + rm /tmp/tmp.w5XpFompvS /tmp/tmp.sGLBaZEQ3w + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=smart-update-11317 +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RPNmGNHYVX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7e27mZJlEG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.RPNmGNHYVX +++ cat /tmp/tmp.7e27mZJlEG +++ rm /tmp/tmp.RPNmGNHYVX /tmp/tmp.7e27mZJlEG +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R6Sfmdag6P ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MKVJ2YUX6N +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.R6Sfmdag6P +++ cat /tmp/tmp.MKVJ2YUX6N +++ rm /tmp/tmp.R6Sfmdag6P /tmp/tmp.MKVJ2YUX6N +++ return 0 +++ kubectl_bin -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ base64 -d ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HakaB6nfJu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QdLkH3mCWz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.HakaB6nfJu +++ cat /tmp/tmp.QdLkH3mCWz +++ rm /tmp/tmp.HakaB6nfJu /tmp/tmp.QdLkH3mCWz +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.smart-update-11317 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.smart-update-11317 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gJWXiMBW2n +++ mktemp ++ local LAST_ERR=/tmp/tmp.ReHK24Zp0B ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.gJWXiMBW2n ++ cat /tmp/tmp.ReHK24Zp0B ++ rm /tmp/tmp.gJWXiMBW2n /tmp/tmp.ReHK24Zp0B ++ return 0 + local client_container=pg-client-76df9df876-5hkqg + kubectl_bin exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.bTELidhusS ++ mktemp + local LAST_ERR=/tmp/tmp.8CTEnyjnhK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.bTELidhusS + cat /tmp/tmp.8CTEnyjnhK + rm /tmp/tmp.bTELidhusS /tmp/tmp.8CTEnyjnhK + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=smart-update-11317 +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QzJOA8aywZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Gztl3KtimX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QzJOA8aywZ +++ cat /tmp/tmp.Gztl3KtimX +++ rm /tmp/tmp.QzJOA8aywZ /tmp/tmp.Gztl3KtimX +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OHTjwillyi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BxXoYMOmYH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OHTjwillyi +++ cat /tmp/tmp.BxXoYMOmYH +++ rm /tmp/tmp.OHTjwillyi /tmp/tmp.BxXoYMOmYH +++ return 0 +++ kubectl_bin -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ base64 -d ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IkdmK7hgKP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A55p5wGs75 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get secret/cluster1-postgres-secret -o 'jsonpath={.data.password}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IkdmK7hgKP +++ cat /tmp/tmp.A55p5wGs75 +++ rm /tmp/tmp.IkdmK7hgKP /tmp/tmp.A55p5wGs75 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.smart-update-11317 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.smart-update-11317 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X4M7KsEct5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FpSYPEHHgS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.X4M7KsEct5 ++ cat /tmp/tmp.FpSYPEHHgS ++ rm /tmp/tmp.X4M7KsEct5 /tmp/tmp.FpSYPEHHgS ++ return 0 + local client_container=pg-client-76df9df876-5hkqg + kubectl_bin exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.iUiEcazJ9o ++ mktemp + local LAST_ERR=/tmp/tmp.X40X3Xd3VB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.smart-update-11317.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.iUiEcazJ9o + cat /tmp/tmp.X40X3Xd3VB + rm /tmp/tmp.iUiEcazJ9o /tmp/tmp.X40X3Xd3VB + return 0 + sleep 10 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=smart-update-11317 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + sleep 30 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/main_cr.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qFRRY2ddfH ++ mktemp + local LAST_ERR=/tmp/tmp.HXeqSCC9RU + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/conf/main_cr.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qFRRY2ddfH perconapgcluster.pg.percona.com/cluster1 configured + cat /tmp/tmp.HXeqSCC9RU + rm /tmp/tmp.qFRRY2ddfH /tmp/tmp.HXeqSCC9RU + return 0 + sleep 60 + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=smart-update-11317 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1QHb25maPc +++ mktemp ++ local LAST_ERR=/tmp/tmp.FSEtDMg2WE ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.1QHb25maPc ++ cat /tmp/tmp.FSEtDMg2WE ++ rm /tmp/tmp.1QHb25maPc /tmp/tmp.FSEtDMg2WE ++ return 0 + status_query='kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xrYPFOLXpA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.y5djgKozZO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.xrYPFOLXpA +++ cat /tmp/tmp.y5djgKozZO +++ rm /tmp/tmp.xrYPFOLXpA /tmp/tmp.y5djgKozZO +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 ++ get_psql_user_pass pgbouncer cluster1 ++ local user=pgbouncer ++ local cluster=cluster1 ++ local namespace=smart-update-11317 +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cQvPjGKWBx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.alM6F3bPo2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.cQvPjGKWBx +++ cat /tmp/tmp.alM6F3bPo2 +++ rm /tmp/tmp.cQvPjGKWBx /tmp/tmp.alM6F3bPo2 +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wacp9PTdbW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rVqXcuc3Vm +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wacp9PTdbW +++ cat /tmp/tmp.rVqXcuc3Vm +++ rm /tmp/tmp.wacp9PTdbW /tmp/tmp.rVqXcuc3Vm +++ return 0 +++ kubectl_bin -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EkuY2rjJtD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DkLytFKSiC +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.EkuY2rjJtD +++ cat /tmp/tmp.DkLytFKSiC +++ rm /tmp/tmp.EkuY2rjJtD /tmp/tmp.DkLytFKSiC +++ return 0 ++ [[ x == \x ]] ++ : assuming user secret was created by default +++ kubectl_bin -n smart-update-11317 get secret/cluster1-users-secret -o 'jsonpath={.data.pgbouncer}' +++ base64 -d ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FbjAMEi3J1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IINbLaEEeW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade +++ kubectl -n smart-update-11317 get secret/cluster1-users-secret -o 'jsonpath={.data.pgbouncer}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.FbjAMEi3J1 +++ cat /tmp/tmp.IINbLaEEeW +++ rm /tmp/tmp.FbjAMEi3J1 /tmp/tmp.IINbLaEEeW +++ return 0 ++ echo pgbouncer_password + compare_psql_cmd show-1 'SHOW DATABASES' pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317 + local command_id=show-1 + local 'command=SHOW DATABASES' + local uri=pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317 + local postfix= + local uri_suffix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/compare/show-1.sql + run_psql 'SHOW DATABASES' pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317 postgres '' + local 'command=SHOW DATABASES' + local uri=pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2T0I48uNJO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dq8EPBdgqH ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl -n smart-update-11317 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.2T0I48uNJO ++ cat /tmp/tmp.Dq8EPBdgqH ++ rm /tmp/tmp.2T0I48uNJO /tmp/tmp.Dq8EPBdgqH ++ return 0 + local client_container=pg-client-76df9df876-5hkqg + kubectl_bin exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''SHOW DATABASES\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.MCFd3uf3lV ++ mktemp + local LAST_ERR=/tmp/tmp.xjFex6SgPG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl exec pg-client-76df9df876-5hkqg -n smart-update-11317 -- bash -c 'printf '\''SHOW DATABASES\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://pgbouncer:pgbouncer_password@cluster1-pgbouncer.smart-update-11317.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.MCFd3uf3lV + cat /tmp/tmp.xjFex6SgPG + rm /tmp/tmp.MCFd3uf3lV /tmp/tmp.xjFex6SgPG + return 0 + '[' '!' -s /tmp/tmp.AQPkEqWGwq/show-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/smart-update/compare/show-1.sql /tmp/tmp.AQPkEqWGwq/show-1.sql + destroy smart-update-11317 + grep -v level=info + grep -v level=debug ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + grep -v 'Getting pods from source' + grep -v 'Getting tasks for pod' + sort -u + grep -v 'get backup status: Job.batch' + grep -v 'the object has been modified' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' +++ mktemp + tee /tmp/tmp.AQPkEqWGwq/operator.log ++ local LAST_OUT=/tmp/tmp.VzawhUHxRo +++ mktemp ++ local LAST_ERR=/tmp/tmp.ichVur7ot5 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.VzawhUHxRo ++ cat /tmp/tmp.ichVur7ot5 ++ rm /tmp/tmp.VzawhUHxRo /tmp/tmp.ichVur7ot5 ++ return 0 + kubectl_bin logs postgres-operator-575c4cf5b4-h86bl -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.zvSMeXj3PU ++ mktemp + local LAST_ERR=/tmp/tmp.gxEDo1sLAG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl logs postgres-operator-575c4cf5b4-h86bl -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zvSMeXj3PU + cat /tmp/tmp.gxEDo1sLAG + rm /tmp/tmp.zvSMeXj3PU /tmp/tmp.gxEDo1sLAG + return 0 ] { } }, ] } }, { } }, { } ] } } ] } { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ,{ ], { } }, }], { } }, }], } ], { } }, } ] ], } } ], }, }, { } }, }, { } }, }, 2024/05/24 18:23:48 INF 1 (localhost:4150) connecting to nsqd 2024/05/24 18:24:48 INF 2 (localhost:4150) connecting to nsqd 2024/05/24 18:24:49 INF 3 (localhost:4150) connecting to nsqd 2024/05/24 18:25:05 INF 4 (localhost:4150) connecting to nsqd 2024/05/24 18:25:40 INF 5 (localhost:4150) connecting to nsqd 2024/05/24 18:25:40 INF 6 (localhost:4150) connecting to nsqd 2024/05/24 18:25:40 INF 7 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "cluster1-pgbr-repo" "cluster1" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 10000, "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "2", "cpu": "500m", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgha-scope": "cluster1","pgo-version": "1.0.0","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin" "defaultMode": 288 "defaultMode": 420, "deployment-name": "cluster1" "deployment-name": "cluster1-repl1" "deployment-name": "cluster1-repl2" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "env": [ { "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0524 18:23:23.172663 1 shared_informer.go:240] Waiting for caches to sync for namespace I0524 18:23:23.273145 1 shared_informer.go:247] Caches are synced for namespace I0524 18:23:23.388004 1 shared_informer.go:240] Waiting for caches to sync for smart-update-11317 I0524 18:23:23.489195 1 shared_informer.go:247] Caches are synced for smart-update-11317 "image": "percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest", "image": "percona/percona-postgresql-operator:1.0.0-ppg13-pgbackrest-repo", "image": "percona/percona-postgresql-operator:1.0.0-ppg13-pgbadger", "image": "percona/percona-postgresql-operator:1.0.0-ppg13-pgbouncer", "image": "percona/percona-postgresql-operator:1.0.0-ppg13-postgres-ha", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 20, "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "limits": { "limits": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "48Mi" "memory": "512Mi" "memory": "64Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgdata", "mountPath": "/pgdata", "mountPath": "/report", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "mountPath": "/tmp", "name": "backrest", "name": "backrest-backup-cluster1", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "BADGER_TARGET", "name": "cluster1", "name": "cluster1", "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo" "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer-cm" "name": "cluster1-pgbouncer-secret", "name": "cluster1-pgbouncer-secret", "name": "cluster1-pgbr-repo", "name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.0.0" "name": "cluster1-pgha-config", "name": "cluster1-pgha-config", "name": "cluster1-repl1", "name": "cluster1-repl1", "name": "cluster1-repl2", "name": "cluster1-repl2", "name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1","pgo-version": "1.0.0" "name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2","pgo-version": "1.0.0" "name": "cluster1-stanza-create", "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbadger" "name": "pgbadger", "name": "PGBADGER_SERVICE_PORT", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "report", "name": "root-volume" "name": "root-volume", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "tmp" "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "periodSeconds": 10 "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"cluster1"} "persistentVolumeClaim":{"claimName":"cluster1-repl1"} "persistentVolumeClaim":{"claimName":"cluster1-repl2"} "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgo-version": "1.0.0","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1" "pgo-version": "1.0.0","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2" "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 10000 "port": 2022, "ports": [{ "ports": [{ "ports": [ { "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "cluster1-backrest-repo-config" "secretName": "cluster1-backrest-repo-config", "secretName": "cluster1-backrest-repo-config" "secretName": "cluster1-backrest-repo-config", "secretName": "cluster1-postgres-secret" "secretName": "cluster1-primaryuser-secret" "secretName": "cluster1-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": { "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "service-name": "cluster1-backrest-shared-repo", "service-name": "cluster1-backrest-shared-repo", "service-name": "cluster1-pgbouncer", "service-name": "cluster1-pgbouncer", "service-name": "cluster1-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1G" "strategy": { "targetPort": 2022, "tcpSocket": { "template": { time="2024-05-24T18:23:22Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.7.0 time="2024-05-24T18:23:52Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.7.0 time="2024-05-24T18:23:57Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.7.0 time="2024-05-24T18:24:48Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.7.0 time="2024-05-24T18:24:48Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.7.0 time="2024-05-24T18:25:40Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T18:25:41Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T18:25:41Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T18:30:31Z" level=error msg="update perconapgcluster: handle internal secrets: get secret cluster1-users: secrets \"cluster1-users\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:481" version=1.7.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "10000" "value": "5432" "value": "5432" "value": "/backrestrepo/cluster1-backrest-shared-repo" "value": "/backrestrepo/cluster1-backrest-shared-repo" "value": "backup" "value": "cluster1" "value": "cluster1" "value": "cluster1-backrest-shared-repo" "value": "cluster1-backrest-shared-repo-d7c658b7-wv2xb" "value": "cluster1-repl1" "value": "cluster1-repl2" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.101.82.20 --db-path=/pgdata/cluster1" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/cluster1" "value": "/pgdata/cluster1" "value": "/pgdata/cluster1-repl1" "value": "/pgdata/cluster1-repl1" "value": "/pgdata/cluster1-repl2" "value": "/pgdata/cluster1-repl2" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.101.82.20 --db-path=/pgdata/cluster1" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumeMounts": [ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4Ak4AYnVsz ++ mktemp + local LAST_ERR=/tmp/tmp.M9TXqcrq1R + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete -f /tmp/tmp.AQPkEqWGwq/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.4Ak4AYnVsz + cat /tmp/tmp.M9TXqcrq1R Error from server (NotFound): error when deleting "/tmp/tmp.AQPkEqWGwq/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.AQPkEqWGwq/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.AQPkEqWGwq/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.AQPkEqWGwq/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.AQPkEqWGwq/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.4Ak4AYnVsz /tmp/tmp.M9TXqcrq1R + return 1 + true + yq w -d4 /tmp/tmp.AQPkEqWGwq/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.AQPkEqWGwq/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TalK9eb4Bg ++ mktemp + local LAST_ERR=/tmp/tmp.EGRJrBhLE0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl apply -f /tmp/tmp.AQPkEqWGwq/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TalK9eb4Bg serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.EGRJrBhLE0 + rm /tmp/tmp.TalK9eb4Bg /tmp/tmp.EGRJrBhLE0 + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=smart-update-11317 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n smart-update-11317 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.AQPkEqWGwq/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Dj1KaazZjy ++ mktemp + local LAST_ERR=/tmp/tmp.DKy3ZtS5Tt + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete -f /tmp/tmp.AQPkEqWGwq/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Dj1KaazZjy serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.DKy3ZtS5Tt + rm /tmp/tmp.Dj1KaazZjy /tmp/tmp.DKy3ZtS5Tt + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + xargs kubectl delete ns --grace-period=0 --force=true ++ mktemp + local LAST_OUT=/tmp/tmp.k0ZPQI1s8F ++ mktemp + local LAST_ERR=/tmp/tmp.OAGeHxPXO0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.k0ZPQI1s8F + cat /tmp/tmp.OAGeHxPXO0 + rm /tmp/tmp.k0ZPQI1s8F /tmp/tmp.OAGeHxPXO0 + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "smart-update-11317" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.3VCs20L9b7 ++ mktemp + local LAST_ERR=/tmp/tmp.uYvSL2R3T6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.3VCs20L9b7 + cat /tmp/tmp.uYvSL2R3T6 Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.3VCs20L9b7 /tmp/tmp.uYvSL2R3T6 + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.qkSGiSw02d ++ mktemp + local LAST_ERR=/tmp/tmp.ESTby07i6D + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-upgrade + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.qkSGiSw02d + cat /tmp/tmp.ESTby07i6D Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.qkSGiSw02d /tmp/tmp.ESTby07i6D + return 1 + true + rm -rf /tmp/tmp.AQPkEqWGwq