++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/version-service.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/version-service.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + VS_URL=http://version-service + VS_PORT=11000 + VS_ENDPOINT=http://version-service:11000 + main + create_namespace version-service-31598 + local namespace=version-service-31598 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + cleanup_rbac + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.XplxeoM36x ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.NmPFPHlJQG + local LAST_OUT=/tmp/tmp.b9Uq4cq6Tb + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + local LAST_ERR=/tmp/tmp.FnoYdSkGAX + local exit_status=0 + local timeout=4 + set +o errexit + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XplxeoM36x + cat /tmp/tmp.NmPFPHlJQG E0423 13:33:28.616813 24617 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:28.738139 24617 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:28.852828 24617 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:28.964373 24617 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request + rm /tmp/tmp.XplxeoM36x /tmp/tmp.NmPFPHlJQG + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level E0423 13:33:30.250956 24757 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:30.487310 24757 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:30.596428 24757 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:30.713384 24757 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.b9Uq4cq6Tb + cat /tmp/tmp.FnoYdSkGAX E0423 13:33:36.439678 25109 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:36.666513 25109 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:36.773991 25109 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:36.882250 25109 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.b9Uq4cq6Tb /tmp/tmp.FnoYdSkGAX + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.b0521IRBQp ++ mktemp + local LAST_ERR=/tmp/tmp.txcuDaUXoD + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.b0521IRBQp + cat /tmp/tmp.txcuDaUXoD E0423 13:33:54.145582 25891 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:54.362981 25891 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:54.471737 25891 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:33:54.580007 25891 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.b0521IRBQp /tmp/tmp.txcuDaUXoD + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth E0423 13:34:04.063079 26179 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:34:04.380073 26179 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:34:04.487037 26179 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0423 13:34:04.595595 26179 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.pWX9eSAkrz ++ mktemp + local LAST_ERR=/tmp/tmp.znl3ZsVZ2x + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete namespace version-service-31598 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete namespace version-service-31598 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete namespace version-service-31598 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.pWX9eSAkrz + cat /tmp/tmp.znl3ZsVZ2x Error from server (NotFound): namespaces "version-service-31598" not found + rm /tmp/tmp.pWX9eSAkrz /tmp/tmp.znl3ZsVZ2x + return 1 + : + wait_for_delete namespace/version-service-31598 + local res=namespace/version-service-31598 + set +o xtrace namespace/version-service-31598 - Error from server (NotFound): namespaces "version-service-31598" not found + kubectl_bin create namespace version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.lx4s7pktPT ++ mktemp + local LAST_ERR=/tmp/tmp.B8mZjFuI6y + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl create namespace version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lx4s7pktPT namespace/version-service-31598 created + cat /tmp/tmp.B8mZjFuI6y + rm /tmp/tmp.lx4s7pktPT /tmp/tmp.B8mZjFuI6y + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.aHdu6ck0gw +++ mktemp ++ local LAST_ERR=/tmp/tmp.kikjxjB15c ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.aHdu6ck0gw ++ cat /tmp/tmp.kikjxjB15c ++ rm /tmp/tmp.aHdu6ck0gw /tmp/tmp.kikjxjB15c ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-version-service --namespace=version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.ijM39Q3ryL ++ mktemp + local LAST_ERR=/tmp/tmp.MufyAq3gCI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-version-service --namespace=version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ijM39Q3ryL Context "gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-version-service" modified. + cat /tmp/tmp.MufyAq3gCI + rm /tmp/tmp.ijM39Q3ryL /tmp/tmp.MufyAq3gCI + return 0 + deploy_operator '' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml disabled install version-service-31598 false http://version-service:11000 + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=version-service-31598 + local dont_send_telemetry=false + local version_service_uri_override=http://version-service:11000 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml disabled install version-service-31598 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=version-service-31598 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "version-service-31598"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + cat /dev/fd/63 - + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "version-service-31598"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' ++ echo 'crunchy_debug: "true"' ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b ++ cut -d: -f1 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-721-a67b3475b"#g' ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w -d3 - 'subjects[0].namespace' version-service-31598 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-721-a67b3475b-pgo-deployer + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml metadata.namespace version-service-31598 ++ cat /tmp/tmp.ZAvsDSImr9/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "version-service-31598" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-721-a67b3475b" pgo_operator_namespace: "version-service-31598"' + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4EYtRKbxW5 ++ mktemp + local LAST_ERR=/tmp/tmp.F18CtJtGjX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4EYtRKbxW5 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.F18CtJtGjX + rm /tmp/tmp.4EYtRKbxW5 /tmp/tmp.F18CtJtGjX + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.S7Z6vhsNn7 ++ mktemp + local LAST_ERR=/tmp/tmp.XLa0lgi7bd + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.S7Z6vhsNn7 serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.XLa0lgi7bd + rm /tmp/tmp.S7Z6vhsNn7 /tmp/tmp.XLa0lgi7bd + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UlwyreVjHA +++ mktemp ++ local LAST_ERR=/tmp/tmp.U86GGEztfE ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.UlwyreVjHA ++ cat /tmp/tmp.U86GGEztfE ++ rm /tmp/tmp.UlwyreVjHA /tmp/tmp.U86GGEztfE ++ return 0 + wait_pod postgres-operator-6d9b857566-fklnb + local pod=postgres-operator-6d9b857566-fklnb + local ns= + set +o xtrace postgres-operator-6d9b857566-fklnbtrue + [[ -n http://version-service:11000 ]] + kubectl_bin -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"add","path":"/spec/template/spec/containers/1/env/-","value":{"name":"PERCONA_VS_FALLBACK_URI","value":"http://version-service:11000"}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.4agZQHNtWI ++ mktemp + local LAST_ERR=/tmp/tmp.Nl0Tx9v56r + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"add","path":"/spec/template/spec/containers/1/env/-","value":{"name":"PERCONA_VS_FALLBACK_URI","value":"http://version-service:11000"}}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4agZQHNtWI deployment.apps/postgres-operator patched + cat /tmp/tmp.Nl0Tx9v56r + rm /tmp/tmp.4agZQHNtWI /tmp/tmp.Nl0Tx9v56r + return 0 + sleep 15 + deploy_version_service + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap versions --from-file /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/operator.9.9.9.pg-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/operator.9.9.9.pg-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.cdAKX2YltH ++ mktemp + local LAST_ERR=/tmp/tmp.iOnpa6wL4l + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl create configmap versions --from-file /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/operator.9.9.9.pg-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/operator.9.9.9.pg-operator.json + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.cdAKX2YltH configmap/versions created + cat /tmp/tmp.iOnpa6wL4l + rm /tmp/tmp.cdAKX2YltH /tmp/tmp.iOnpa6wL4l + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.UEnamLHY7k ++ mktemp + local LAST_ERR=/tmp/tmp.plpMyrsxhT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/vs.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.UEnamLHY7k deployment.apps/version-service created service/version-service created + cat /tmp/tmp.plpMyrsxhT + rm /tmp/tmp.UEnamLHY7k /tmp/tmp.plpMyrsxhT + return 0 + /usr/bin/sed s/version-service/version-service-cr/g /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/vs.yml + kubectl_bin apply -f - ++ yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/vs.yml 'spec.template.spec.containers[0].image' ++ mktemp + local LAST_OUT=/tmp/tmp.FKY6Z7fgxO ++ mktemp + local LAST_ERR=/tmp/tmp.1E2nPFDB0f + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + yq w - -d0 'spec.template.spec.containers[0].image' perconalab/version-service:main-ea25e30 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.FKY6Z7fgxO deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.1E2nPFDB0f + rm /tmp/tmp.FKY6Z7fgxO /tmp/tmp.1E2nPFDB0f + return 0 + sleep 10 + check_telemetry_transfer http://version-service-cr:11000 disabled enabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + local cluster=minimal + yq r /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr-minimal.yaml + yq w - spec.upgradeOptions.versionServiceEndpoint http://version-service-cr:11000 + yq w - spec.upgradeOptions.apply disabled + spinup_pgcluster minimal /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local cluster=minimal + local config=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets minimal version-service-31598 + local cluster=minimal + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - + yq w - metadata.name minimal-users-secret ++ mktemp + local LAST_OUT=/tmp/tmp.pfCNC2y3AX ++ mktemp + local LAST_ERR=/tmp/tmp.nyflFDPao4 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.pfCNC2y3AX secret/minimal-users-secret created + cat /tmp/tmp.nyflFDPao4 + rm /tmp/tmp.pfCNC2y3AX /tmp/tmp.nyflFDPao4 + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name minimal-pmm-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XV1Dp9mpsE ++ mktemp + local LAST_ERR=/tmp/tmp.pe7L266CNK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XV1Dp9mpsE secret/minimal-pmm-secret created + cat /tmp/tmp.pe7L266CNK + rm /tmp/tmp.XV1Dp9mpsE /tmp/tmp.pe7L266CNK + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ogkd5Tj1FZ ++ mktemp + local LAST_ERR=/tmp/tmp.hE062468p4 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ogkd5Tj1FZ deployment.apps/pg-client created + cat /tmp/tmp.hE062468p4 + rm /tmp/tmp.ogkd5Tj1FZ /tmp/tmp.hE062468p4 + return 0 + apply_cluster /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml minimal false false false false false false false false + local path=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local name=minimal + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml metadata.annotations.current-primary minimal + yq w - metadata.labels.crunchy-pgha-scope minimal + yq w - spec.name minimal + yq w - metadata.labels.pg-cluster minimal + yq w - spec.database minimal + yq w - metadata.labels.name minimal + yq w - spec.user some-name + yq w - metadata.labels.deployment-name minimal + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.keepBackups false + yq w - spec.secretsName minimal-users-secret + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - metadata.name minimal + yq w - spec.clustername minimal + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.keepData false + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.Oh90qlwxh4 ++ mktemp + local LAST_ERR=/tmp/tmp.tzlELh81Aa + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Oh90qlwxh4 perconapgcluster.pg.percona.com/minimal created + cat /tmp/tmp.tzlELh81Aa + rm /tmp/tmp.Oh90qlwxh4 /tmp/tmp.tzlELh81Aa + return 0 + wait_deployment minimal-backrest-shared-repo + local name=minimal-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal-backrest-shared-repo... + wait_deployment minimal + local name=minimal + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal............ + [[ false == \g\c\s\+* ]] + wait_job_completion minimal-stanza-create false + local job=minimal-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' minimal-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-minimal + local job=backrest-backup-minimal + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' backrest-backup-minimal+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency minimal + cluster_name=minimal + wait_cluster_status minimal 'pgcluster Initialized' + local cluster_name=minimal + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6EA9jZC93s +++ mktemp ++ local LAST_ERR=/tmp/tmp.F6WcYdXUpG ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6EA9jZC93s ++ cat /tmp/tmp.F6WcYdXUpG ++ rm /tmp/tmp.6EA9jZC93s /tmp/tmp.F6WcYdXUpG ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9BUM8wlgiw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R660NyW6Lc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9BUM8wlgiw +++ cat /tmp/tmp.R660NyW6Lc +++ rm /tmp/tmp.9BUM8wlgiw /tmp/tmp.R660NyW6Lc +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2QC2RmMwPp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cG2gy6WcfO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.2QC2RmMwPp +++ cat /tmp/tmp.cG2gy6WcfO +++ rm /tmp/tmp.2QC2RmMwPp /tmp/tmp.cG2gy6WcfO +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qf22sVGOeS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0EiST5fCuY +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qf22sVGOeS +++ cat /tmp/tmp.0EiST5fCuY +++ rm /tmp/tmp.qf22sVGOeS /tmp/tmp.0EiST5fCuY +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LhswCvkJyV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UnakCrEFVO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.LhswCvkJyV +++ cat /tmp/tmp.UnakCrEFVO +++ rm /tmp/tmp.LhswCvkJyV /tmp/tmp.UnakCrEFVO +++ return 0 ++ [[ xminimal-users-secret == \x ]] ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp +++ base64 -d ++++ local LAST_OUT=/tmp/tmp.zO22GMkuJD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hUUoHCiOml ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.zO22GMkuJD ++++ cat /tmp/tmp.hUUoHCiOml ++++ rm /tmp/tmp.zO22GMkuJD /tmp/tmp.hUUoHCiOml ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8VXROOVDAd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DG2tdaJKlD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8VXROOVDAd +++ cat /tmp/tmp.DG2tdaJKlD +++ rm /tmp/tmp.8VXROOVDAd /tmp/tmp.DG2tdaJKlD +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@minimal.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k8nC33aq8w +++ mktemp ++ local LAST_ERR=/tmp/tmp.61ZrC2MLQ1 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.k8nC33aq8w ++ cat /tmp/tmp.61ZrC2MLQ1 ++ rm /tmp/tmp.k8nC33aq8w /tmp/tmp.61ZrC2MLQ1 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.2if7FRgGNf ++ mktemp + local LAST_ERR=/tmp/tmp.WDswYUVyEw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.2if7FRgGNf + cat /tmp/tmp.WDswYUVyEw + rm /tmp/tmp.2if7FRgGNf /tmp/tmp.WDswYUVyEw + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZorzOGD53l ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l4lbj0Pbte +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ZorzOGD53l +++ cat /tmp/tmp.l4lbj0Pbte +++ rm /tmp/tmp.ZorzOGD53l /tmp/tmp.l4lbj0Pbte +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wyK4DxxqDR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cqru7EakIq +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wyK4DxxqDR +++ cat /tmp/tmp.cqru7EakIq +++ rm /tmp/tmp.wyK4DxxqDR /tmp/tmp.cqru7EakIq +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zcC2ItJ8hv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iia1C1HcTb +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.zcC2ItJ8hv +++ cat /tmp/tmp.iia1C1HcTb +++ rm /tmp/tmp.zcC2ItJ8hv /tmp/tmp.iia1C1HcTb +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UxD1VqdD2e +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jKY0JVMBYu ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.UxD1VqdD2e ++++ cat /tmp/tmp.jKY0JVMBYu ++++ rm /tmp/tmp.UxD1VqdD2e /tmp/tmp.jKY0JVMBYu ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0aSjbg9xzO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.M1M94qOy9L +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0aSjbg9xzO +++ cat /tmp/tmp.M1M94qOy9L +++ rm /tmp/tmp.0aSjbg9xzO /tmp/tmp.M1M94qOy9L +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IOnqEbZGnV +++ mktemp ++ local LAST_ERR=/tmp/tmp.fTHQOYZEIi ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.IOnqEbZGnV ++ cat /tmp/tmp.fTHQOYZEIi ++ rm /tmp/tmp.IOnqEbZGnV /tmp/tmp.fTHQOYZEIi ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.UPM51xtY6a ++ mktemp + local LAST_ERR=/tmp/tmp.iFe56wj6Zv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.UPM51xtY6a + cat /tmp/tmp.iFe56wj6Zv + rm /tmp/tmp.UPM51xtY6a /tmp/tmp.iFe56wj6Zv + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4ojme2Jy52 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PqKvadUMKB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.4ojme2Jy52 +++ cat /tmp/tmp.PqKvadUMKB +++ rm /tmp/tmp.4ojme2Jy52 /tmp/tmp.PqKvadUMKB +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.f9U0vah7LJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yCTJCtEJLR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.f9U0vah7LJ +++ cat /tmp/tmp.yCTJCtEJLR +++ rm /tmp/tmp.f9U0vah7LJ /tmp/tmp.yCTJCtEJLR +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.epRlKhtMT4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3G3JYYSv7C +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.epRlKhtMT4 +++ cat /tmp/tmp.3G3JYYSv7C +++ rm /tmp/tmp.epRlKhtMT4 /tmp/tmp.3G3JYYSv7C +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6JUns68QA0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.AjTFdG1Rn4 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.6JUns68QA0 ++++ cat /tmp/tmp.AjTFdG1Rn4 ++++ rm /tmp/tmp.6JUns68QA0 /tmp/tmp.AjTFdG1Rn4 ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IgR9NjUFoe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S23mzcOM0Q +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IgR9NjUFoe +++ cat /tmp/tmp.S23mzcOM0Q +++ rm /tmp/tmp.IgR9NjUFoe /tmp/tmp.S23mzcOM0Q +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HK92jv52j2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OYxQaVqOBo ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.HK92jv52j2 ++ cat /tmp/tmp.OYxQaVqOBo ++ rm /tmp/tmp.HK92jv52j2 /tmp/tmp.OYxQaVqOBo ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.6ZTPll1T9d ++ mktemp + local LAST_ERR=/tmp/tmp.x2tasbAnBO + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.6ZTPll1T9d + cat /tmp/tmp.x2tasbAnBO + rm /tmp/tmp.6ZTPll1T9d /tmp/tmp.x2tasbAnBO + return 0 + sleep 10 + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + grep -E 'server request payload|unary call' + kubectl_bin logs version-service-cr-76cc4777bf-zspc4 ++ mktemp + local LAST_OUT=/tmp/tmp.zobZxq5AK4 ++ mktemp + local LAST_ERR=/tmp/tmp.rHDIjjzbbu + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-cr-76cc4777bf-zspc4 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zobZxq5AK4 + cat /tmp/tmp.rHDIjjzbbu + rm /tmp/tmp.zobZxq5AK4 /tmp/tmp.rHDIjjzbbu + return 0 + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.start_time")' + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + kubectl_bin logs version-service-5cd4db57-78wsq ++ mktemp + local LAST_OUT=/tmp/tmp.drPbhwVZOU ++ mktemp + local LAST_ERR=/tmp/tmp.mBizoEx9PY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-5cd4db57-78wsq + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.drPbhwVZOU + cat /tmp/tmp.mBizoEx9PY + rm /tmp/tmp.drPbhwVZOU /tmp/tmp.mBizoEx9PY + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/enabled_telemetry.version-service.log.json + [[ enabled == \d\i\s\a\b\l\e\d ]] + diff -u /tmp/tmp.ZAvsDSImr9/enabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/enabled_telemetry.version-service-cr.log.json + kubectl_bin delete perconapgcluster/minimal ++ mktemp + local LAST_OUT=/tmp/tmp.wBgEv1DtiO ++ mktemp + local LAST_ERR=/tmp/tmp.bxCvn57xEj + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete perconapgcluster/minimal + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wBgEv1DtiO perconapgcluster.pg.percona.com "minimal" deleted + cat /tmp/tmp.bxCvn57xEj + rm /tmp/tmp.wBgEv1DtiO /tmp/tmp.bxCvn57xEj + return 0 ++ kubectl_bin -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kYzGDy2X4C +++ mktemp ++ local LAST_ERR=/tmp/tmp.yI3Mq6XW6N ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.kYzGDy2X4C ++ cat /tmp/tmp.yI3Mq6XW6N ++ rm /tmp/tmp.kYzGDy2X4C /tmp/tmp.yI3Mq6XW6N ++ return 0 + wait_job_completion minimal-rmdata-mubn + local job=minimal-rmdata-mubn + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' minimal-rmdata-mubn+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-mubn -o 'jsonpath={.metadata.name}' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + kubectl_bin -n version-service-31598 delete pod --selector=run=version-service ++ mktemp + local LAST_OUT=/tmp/tmp.hqDgIWnoty ++ mktemp + local LAST_ERR=/tmp/tmp.ZVhVg6Qi1j + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 delete pod --selector=run=version-service + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hqDgIWnoty pod "version-service-5cd4db57-78wsq" deleted + cat /tmp/tmp.ZVhVg6Qi1j + rm /tmp/tmp.hqDgIWnoty /tmp/tmp.ZVhVg6Qi1j + return 0 + wait_deployment version-service + local name=version-service + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace version-service + kubectl_bin -n version-service-31598 delete pod --selector=run=version-service-cr ++ mktemp + local LAST_OUT=/tmp/tmp.qNEHu4rRqz ++ mktemp + local LAST_ERR=/tmp/tmp.T4Pjd9tqro + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 delete pod --selector=run=version-service-cr + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qNEHu4rRqz pod "version-service-cr-76cc4777bf-zspc4" deleted + cat /tmp/tmp.T4Pjd9tqro + rm /tmp/tmp.qNEHu4rRqz /tmp/tmp.T4Pjd9tqro + return 0 + wait_deployment version-service-cr + local name=version-service-cr + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace version-service-cr + kubectl_bin -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"replace","path":"/spec/template/spec/containers/1/env/7","value":{"name":"DISABLE_TELEMETRY","value":"true"}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.lJimJ30xhp ++ mktemp + local LAST_ERR=/tmp/tmp.ymhjaCK74b + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"replace","path":"/spec/template/spec/containers/1/env/7","value":{"name":"DISABLE_TELEMETRY","value":"true"}}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lJimJ30xhp deployment.apps/postgres-operator patched + cat /tmp/tmp.ymhjaCK74b + rm /tmp/tmp.lJimJ30xhp /tmp/tmp.ymhjaCK74b + return 0 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace postgres-operator... + check_telemetry_transfer http://version-service-cr:11000 disabled disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + local cluster=minimal + yq w - spec.upgradeOptions.versionServiceEndpoint http://version-service-cr:11000 + yq r /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr-minimal.yaml + yq w - spec.upgradeOptions.apply disabled + spinup_pgcluster minimal /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local cluster=minimal + local config=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets minimal version-service-31598 + local cluster=minimal + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name minimal-users-secret + kubectl_bin -n version-service-31598 apply -f - + yq w - stringData.some-name some-name_pass ++ mktemp + local LAST_OUT=/tmp/tmp.6Dx6zLkfMc ++ mktemp + local LAST_ERR=/tmp/tmp.lHXB0KcJts + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.6Dx6zLkfMc secret/minimal-users-secret created + cat /tmp/tmp.lHXB0KcJts + rm /tmp/tmp.6Dx6zLkfMc /tmp/tmp.lHXB0KcJts + return 0 + yq w - metadata.name minimal-pmm-secret + kubectl_bin -n version-service-31598 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.q0oFQwUSQl ++ mktemp + local LAST_ERR=/tmp/tmp.AVFLXuotzB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.q0oFQwUSQl secret/minimal-pmm-secret configured + cat /tmp/tmp.AVFLXuotzB + rm /tmp/tmp.q0oFQwUSQl /tmp/tmp.AVFLXuotzB + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.YJel2KfKlg ++ mktemp + local LAST_ERR=/tmp/tmp.eJ3tIkvwIZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YJel2KfKlg deployment.apps/pg-client unchanged + cat /tmp/tmp.eJ3tIkvwIZ + rm /tmp/tmp.YJel2KfKlg /tmp/tmp.eJ3tIkvwIZ + return 0 + apply_cluster /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml minimal false false false false false false false false + local path=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml + local name=minimal + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.crunchy-pgha-scope minimal + yq w - metadata.labels.deployment-name minimal + yq w /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.disabled.yaml metadata.annotations.current-primary minimal + yq w - metadata.labels.name minimal + yq w - spec.user some-name + yq w - metadata.labels.pg-cluster minimal + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - metadata.name minimal + yq w - spec.secretsName minimal-users-secret + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.name minimal + yq w - spec.keepBackups false + yq w - spec.database minimal + yq w - spec.clustername minimal + yq w - spec.keepData false + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.e7sP05N0oR ++ mktemp + local LAST_ERR=/tmp/tmp.Gc6C4JyLhe + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.e7sP05N0oR perconapgcluster.pg.percona.com/minimal created + cat /tmp/tmp.Gc6C4JyLhe + rm /tmp/tmp.e7sP05N0oR /tmp/tmp.Gc6C4JyLhe + return 0 + wait_deployment minimal-backrest-shared-repo + local name=minimal-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal-backrest-shared-repo.... + wait_deployment minimal + local name=minimal + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal........... + [[ false == \g\c\s\+* ]] + wait_job_completion minimal-stanza-create false + local job=minimal-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' minimal-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-minimal + local job=backrest-backup-minimal + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' backrest-backup-minimal+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency minimal + cluster_name=minimal + wait_cluster_status minimal 'pgcluster Initialized' + local cluster_name=minimal + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mpQXkRE67h +++ mktemp ++ local LAST_ERR=/tmp/tmp.BnfArYhxP6 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.mpQXkRE67h ++ cat /tmp/tmp.BnfArYhxP6 ++ rm /tmp/tmp.mpQXkRE67h /tmp/tmp.BnfArYhxP6 ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CazyVa8IYc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.J15RIY5BZI +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CazyVa8IYc +++ cat /tmp/tmp.J15RIY5BZI +++ rm /tmp/tmp.CazyVa8IYc /tmp/tmp.J15RIY5BZI +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H80YjixfR1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cDDeTPMnLc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.H80YjixfR1 +++ cat /tmp/tmp.cDDeTPMnLc +++ rm /tmp/tmp.H80YjixfR1 /tmp/tmp.cDDeTPMnLc +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZhMya7PK2s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YHjeDY9Xy1 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ZhMya7PK2s +++ cat /tmp/tmp.YHjeDY9Xy1 +++ rm /tmp/tmp.ZhMya7PK2s /tmp/tmp.YHjeDY9Xy1 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fx4XoLKyEY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Sm0BM88t49 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fx4XoLKyEY +++ cat /tmp/tmp.Sm0BM88t49 +++ rm /tmp/tmp.fx4XoLKyEY /tmp/tmp.Sm0BM88t49 +++ return 0 ++ [[ xminimal-users-secret == \x ]] ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp +++ base64 -d ++++ local LAST_OUT=/tmp/tmp.slYW0FBg9N +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jRK14mIAbR ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.slYW0FBg9N ++++ cat /tmp/tmp.jRK14mIAbR ++++ rm /tmp/tmp.slYW0FBg9N /tmp/tmp.jRK14mIAbR ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NnaZ6czO1J ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CLUmTi4X1k +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.NnaZ6czO1J +++ cat /tmp/tmp.CLUmTi4X1k +++ rm /tmp/tmp.NnaZ6czO1J /tmp/tmp.CLUmTi4X1k +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@minimal.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8S9Xmio1CX +++ mktemp ++ local LAST_ERR=/tmp/tmp.aAD5YPoaXt ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.8S9Xmio1CX ++ cat /tmp/tmp.aAD5YPoaXt ++ rm /tmp/tmp.8S9Xmio1CX /tmp/tmp.aAD5YPoaXt ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.0friFADaHV ++ mktemp + local LAST_ERR=/tmp/tmp.uBfjw0AXLf + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0friFADaHV + cat /tmp/tmp.uBfjw0AXLf + rm /tmp/tmp.0friFADaHV /tmp/tmp.uBfjw0AXLf + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YFZLWvSNV0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i3T4VOFvSH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.YFZLWvSNV0 +++ cat /tmp/tmp.i3T4VOFvSH +++ rm /tmp/tmp.YFZLWvSNV0 /tmp/tmp.i3T4VOFvSH +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Cuh5BXAXDe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AVe0PyVzlR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Cuh5BXAXDe +++ cat /tmp/tmp.AVe0PyVzlR +++ rm /tmp/tmp.Cuh5BXAXDe /tmp/tmp.AVe0PyVzlR +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Oz2H05Tt2q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YmX3ebm3LL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Oz2H05Tt2q +++ cat /tmp/tmp.YmX3ebm3LL +++ rm /tmp/tmp.Oz2H05Tt2q /tmp/tmp.YmX3ebm3LL +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.y72Tj5kdL0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ZiEpX31L8g ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.y72Tj5kdL0 ++++ cat /tmp/tmp.ZiEpX31L8g ++++ rm /tmp/tmp.y72Tj5kdL0 /tmp/tmp.ZiEpX31L8g ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6lbUvrUJR5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PVGvzSCDV0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6lbUvrUJR5 +++ cat /tmp/tmp.PVGvzSCDV0 +++ rm /tmp/tmp.6lbUvrUJR5 /tmp/tmp.PVGvzSCDV0 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8KrUt3ChTn +++ mktemp ++ local LAST_ERR=/tmp/tmp.M1CaLTaZGk ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.8KrUt3ChTn ++ cat /tmp/tmp.M1CaLTaZGk ++ rm /tmp/tmp.8KrUt3ChTn /tmp/tmp.M1CaLTaZGk ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Cuey8U489L ++ mktemp + local LAST_ERR=/tmp/tmp.O3xDK99y1H + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Cuey8U489L + cat /tmp/tmp.O3xDK99y1H + rm /tmp/tmp.Cuey8U489L /tmp/tmp.O3xDK99y1H + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TLRaDENLPg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GF4VqSlUfU +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TLRaDENLPg +++ cat /tmp/tmp.GF4VqSlUfU +++ rm /tmp/tmp.TLRaDENLPg /tmp/tmp.GF4VqSlUfU +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TgxlmEOQND ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F136ysrym1 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TgxlmEOQND +++ cat /tmp/tmp.F136ysrym1 +++ rm /tmp/tmp.TgxlmEOQND /tmp/tmp.F136ysrym1 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6M6Y7XcRxv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3ERiJQEFxm +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6M6Y7XcRxv +++ cat /tmp/tmp.3ERiJQEFxm +++ rm /tmp/tmp.6M6Y7XcRxv /tmp/tmp.3ERiJQEFxm +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UuDoCqvp5c +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.flG86KbjqM ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.UuDoCqvp5c ++++ cat /tmp/tmp.flG86KbjqM ++++ rm /tmp/tmp.UuDoCqvp5c /tmp/tmp.flG86KbjqM ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UGnj12JsyC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PRMxAE8ZuG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UGnj12JsyC +++ cat /tmp/tmp.PRMxAE8ZuG +++ rm /tmp/tmp.UGnj12JsyC /tmp/tmp.PRMxAE8ZuG +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m9BAZHVjlr +++ mktemp ++ local LAST_ERR=/tmp/tmp.S7xDslGxQ4 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.m9BAZHVjlr ++ cat /tmp/tmp.S7xDslGxQ4 ++ rm /tmp/tmp.m9BAZHVjlr /tmp/tmp.S7xDslGxQ4 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.R1sKZUl4tS ++ mktemp + local LAST_ERR=/tmp/tmp.cYZcPJsOSS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.R1sKZUl4tS + cat /tmp/tmp.cYZcPJsOSS + rm /tmp/tmp.R1sKZUl4tS /tmp/tmp.cYZcPJsOSS + return 0 + sleep 10 + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + kubectl_bin logs version-service-cr-76cc4777bf-v5rk5 ++ mktemp + local LAST_OUT=/tmp/tmp.lsCmlY1YKN ++ mktemp + local LAST_ERR=/tmp/tmp.6cqGJa9ezb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-cr-76cc4777bf-v5rk5 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lsCmlY1YKN + cat /tmp/tmp.6cqGJa9ezb + rm /tmp/tmp.lsCmlY1YKN /tmp/tmp.6cqGJa9ezb + return 0 + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + grep -E 'server request payload|unary call' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.start_time")' + kubectl_bin logs version-service-5cd4db57-x6px5 ++ mktemp + local LAST_OUT=/tmp/tmp.XYpRhUR0Rg ++ mktemp + local LAST_ERR=/tmp/tmp.TPRHdIPhot + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-5cd4db57-x6px5 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XYpRhUR0Rg + cat /tmp/tmp.TPRHdIPhot + rm /tmp/tmp.XYpRhUR0Rg /tmp/tmp.TPRHdIPhot + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/disabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/disabled_telemetry.version-service.log.json + [[ disabled == \d\i\s\a\b\l\e\d ]] + [[ disabled == \r\e\c\o\m\m\e\n\d\e\d ]] + diff -u /tmp/tmp.ZAvsDSImr9/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log.json + kubectl_bin delete perconapgcluster/minimal ++ mktemp + local LAST_OUT=/tmp/tmp.17PEMvuAUr ++ mktemp + local LAST_ERR=/tmp/tmp.tAG0lu8ktF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete perconapgcluster/minimal + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.17PEMvuAUr perconapgcluster.pg.percona.com "minimal" deleted + cat /tmp/tmp.tAG0lu8ktF + rm /tmp/tmp.17PEMvuAUr /tmp/tmp.tAG0lu8ktF + return 0 ++ kubectl_bin -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ykf1pPcKVA +++ mktemp ++ local LAST_ERR=/tmp/tmp.x8RlyVvKXP ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ykf1pPcKVA ++ cat /tmp/tmp.x8RlyVvKXP ++ rm /tmp/tmp.ykf1pPcKVA /tmp/tmp.x8RlyVvKXP ++ return 0 + wait_job_completion minimal-rmdata-cdfz + local job=minimal-rmdata-cdfz + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' minimal-rmdata-cdfz+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-cdfz -o 'jsonpath={.metadata.name}' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + check_telemetry_transfer http://version-service-cr:11000 recommended disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=recommended + local telemetry_state=disabled + local cluster=minimal + yq w - spec.upgradeOptions.versionServiceEndpoint http://version-service-cr:11000 + yq w - spec.upgradeOptions.apply recommended + yq r /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr-minimal.yaml + spinup_pgcluster minimal /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.recommended.yaml + local cluster=minimal + local config=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.recommended.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets minimal version-service-31598 + local cluster=minimal + local namespace=version-service-31598 + yq w - metadata.name minimal-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jw80LFUvEz ++ mktemp + local LAST_ERR=/tmp/tmp.xJRMmg4gpY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jw80LFUvEz secret/minimal-users-secret created + cat /tmp/tmp.xJRMmg4gpY + rm /tmp/tmp.jw80LFUvEz /tmp/tmp.xJRMmg4gpY + return 0 + yq w - metadata.name minimal-pmm-secret + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.pJySUmG4YF ++ mktemp + local LAST_ERR=/tmp/tmp.lEFLs9BuxC + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.pJySUmG4YF secret/minimal-pmm-secret configured + cat /tmp/tmp.lEFLs9BuxC + rm /tmp/tmp.pJySUmG4YF /tmp/tmp.lEFLs9BuxC + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9ZPj1XZpGt ++ mktemp + local LAST_ERR=/tmp/tmp.g5GvHclZgd + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9ZPj1XZpGt deployment.apps/pg-client unchanged + cat /tmp/tmp.g5GvHclZgd + rm /tmp/tmp.9ZPj1XZpGt /tmp/tmp.g5GvHclZgd + return 0 + apply_cluster /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.recommended.yaml minimal false false false false false false false false + local path=/tmp/tmp.ZAvsDSImr9/cr.minimal.apply.recommended.yaml + local name=minimal + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /tmp/tmp.ZAvsDSImr9/cr.minimal.apply.recommended.yaml metadata.annotations.current-primary minimal + yq w - metadata.name minimal + yq w - spec.clustername minimal + yq w - spec.database minimal + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.secretsName minimal-users-secret + yq w - metadata.labels.name minimal + yq w - metadata.labels.pg-cluster minimal + yq w - spec.user some-name + yq w - spec.keepBackups false + yq w - spec.name minimal + yq w - spec.keepData false + yq w - metadata.labels.deployment-name minimal + yq w - metadata.labels.crunchy-pgha-scope minimal + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.1WjKAW9w3H ++ mktemp + local LAST_ERR=/tmp/tmp.qsXOZkxH6q + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1WjKAW9w3H perconapgcluster.pg.percona.com/minimal created + cat /tmp/tmp.qsXOZkxH6q + rm /tmp/tmp.1WjKAW9w3H /tmp/tmp.qsXOZkxH6q + return 0 + wait_deployment minimal-backrest-shared-repo + local name=minimal-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal-backrest-shared-repo. + wait_deployment minimal + local name=minimal + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace minimal...... + [[ false == \g\c\s\+* ]] + wait_job_completion minimal-stanza-create false + local job=minimal-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' minimal-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-minimal + local job=backrest-backup-minimal + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' backrest-backup-minimal+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-minimal -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency minimal + cluster_name=minimal + wait_cluster_status minimal 'pgcluster Initialized' + local cluster_name=minimal + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7GnHxZ4zBU +++ mktemp ++ local LAST_ERR=/tmp/tmp.On1TR3XFvS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.7GnHxZ4zBU ++ cat /tmp/tmp.On1TR3XFvS ++ rm /tmp/tmp.7GnHxZ4zBU /tmp/tmp.On1TR3XFvS ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UmzWSm3i5W ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mE4TjzVQ12 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UmzWSm3i5W +++ cat /tmp/tmp.mE4TjzVQ12 +++ rm /tmp/tmp.UmzWSm3i5W /tmp/tmp.mE4TjzVQ12 +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GAJwrRWDFx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CygWKb7Nqa +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GAJwrRWDFx +++ cat /tmp/tmp.CygWKb7Nqa +++ rm /tmp/tmp.GAJwrRWDFx /tmp/tmp.CygWKb7Nqa +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zzi4mR1Uz6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ch6Ii0rmA2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.zzi4mR1Uz6 +++ cat /tmp/tmp.Ch6Ii0rmA2 +++ rm /tmp/tmp.zzi4mR1Uz6 /tmp/tmp.Ch6Ii0rmA2 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6GhUdtVOeQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2FavhEcmMl +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6GhUdtVOeQ +++ cat /tmp/tmp.2FavhEcmMl +++ rm /tmp/tmp.6GhUdtVOeQ /tmp/tmp.2FavhEcmMl +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6H5mHsZuUe +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FVOMzbMGx0 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.6H5mHsZuUe ++++ cat /tmp/tmp.FVOMzbMGx0 ++++ rm /tmp/tmp.6H5mHsZuUe /tmp/tmp.FVOMzbMGx0 ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qebRzBd5R7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TgQLGFIB0y +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qebRzBd5R7 +++ cat /tmp/tmp.TgQLGFIB0y +++ rm /tmp/tmp.qebRzBd5R7 /tmp/tmp.TgQLGFIB0y +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@minimal.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9d3cn1OSa6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cu2y2HXZsd ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.9d3cn1OSa6 ++ cat /tmp/tmp.cu2y2HXZsd ++ rm /tmp/tmp.9d3cn1OSa6 /tmp/tmp.cu2y2HXZsd ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.kKs8GeJNu4 ++ mktemp + local LAST_ERR=/tmp/tmp.sR05inEKqF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.kKs8GeJNu4 + cat /tmp/tmp.sR05inEKqF + rm /tmp/tmp.kKs8GeJNu4 /tmp/tmp.sR05inEKqF + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H27BEEEYj6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Pri7XEayJg +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.H27BEEEYj6 +++ cat /tmp/tmp.Pri7XEayJg +++ rm /tmp/tmp.H27BEEEYj6 /tmp/tmp.Pri7XEayJg +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uTeIPEHKZJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UrFQ91lqrV +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.uTeIPEHKZJ +++ cat /tmp/tmp.UrFQ91lqrV +++ rm /tmp/tmp.uTeIPEHKZJ /tmp/tmp.UrFQ91lqrV +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IGk1TypbGX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hYyyd0Y5K0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IGk1TypbGX +++ cat /tmp/tmp.hYyyd0Y5K0 +++ rm /tmp/tmp.IGk1TypbGX /tmp/tmp.hYyyd0Y5K0 +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hanR6ShDwn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.V3RUrlB3pt ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.hanR6ShDwn ++++ cat /tmp/tmp.V3RUrlB3pt ++++ rm /tmp/tmp.hanR6ShDwn /tmp/tmp.V3RUrlB3pt ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R56BdU4bZJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AaRUVZkfL7 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.R56BdU4bZJ +++ cat /tmp/tmp.AaRUVZkfL7 +++ rm /tmp/tmp.R56BdU4bZJ /tmp/tmp.AaRUVZkfL7 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNfJNI8aw0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vwepiu7yPg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.aNfJNI8aw0 ++ cat /tmp/tmp.Vwepiu7yPg ++ rm /tmp/tmp.aNfJNI8aw0 /tmp/tmp.Vwepiu7yPg ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.x8VqGyqPkx ++ mktemp + local LAST_ERR=/tmp/tmp.hJZ69wrqc1 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.x8VqGyqPkx + cat /tmp/tmp.hJZ69wrqc1 + rm /tmp/tmp.x8VqGyqPkx /tmp/tmp.hJZ69wrqc1 + return 0 ++ get_psql_user_pass postgres minimal ++ local user=postgres ++ local cluster=minimal ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2jtbx8rb6s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HT8xvA3DzX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.2jtbx8rb6s +++ cat /tmp/tmp.HT8xvA3DzX +++ rm /tmp/tmp.2jtbx8rb6s /tmp/tmp.HT8xvA3DzX +++ return 0 ++ [[ xminimal == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2SoQLMuKmd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VZ8NDHhC5p +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/minimal -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.2SoQLMuKmd +++ cat /tmp/tmp.VZ8NDHhC5p +++ rm /tmp/tmp.2SoQLMuKmd /tmp/tmp.VZ8NDHhC5p +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QSwSbVaX5G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rjkaE5Jae3 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QSwSbVaX5G +++ cat /tmp/tmp.rjkaE5Jae3 +++ rm /tmp/tmp.QSwSbVaX5G /tmp/tmp.rjkaE5Jae3 +++ return 0 ++ [[ xminimal-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mOFvmaIk08 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Gty34GnfDP ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/minimal -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.mOFvmaIk08 ++++ cat /tmp/tmp.Gty34GnfDP ++++ rm /tmp/tmp.mOFvmaIk08 /tmp/tmp.Gty34GnfDP ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fKE4Tkut8i ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gVkAZP0L1C +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/minimal-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fKE4Tkut8i +++ cat /tmp/tmp.gVkAZP0L1C +++ rm /tmp/tmp.fKE4Tkut8i /tmp/tmp.gVkAZP0L1C +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@minimal.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@minimal.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jQDNlnMVZO +++ mktemp ++ local LAST_ERR=/tmp/tmp.fXYIwz1lu0 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jQDNlnMVZO ++ cat /tmp/tmp.fXYIwz1lu0 ++ rm /tmp/tmp.jQDNlnMVZO /tmp/tmp.fXYIwz1lu0 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.yAdxTJtxgA ++ mktemp + local LAST_ERR=/tmp/tmp.xk42Fd2Qjm + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@minimal.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yAdxTJtxgA + cat /tmp/tmp.xk42Fd2Qjm + rm /tmp/tmp.yAdxTJtxgA /tmp/tmp.xk42Fd2Qjm + return 0 + sleep 10 + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + grep -Eo '\{.*\}' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.request.content".msg.customResourceUid)' + kubectl_bin logs version-service-cr-76cc4777bf-v5rk5 ++ mktemp + local LAST_OUT=/tmp/tmp.9l91xjZZqm ++ mktemp + local LAST_ERR=/tmp/tmp.QAnB2MyEQc + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-cr-76cc4777bf-v5rk5 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9l91xjZZqm + cat /tmp/tmp.QAnB2MyEQc + rm /tmp/tmp.9l91xjZZqm /tmp/tmp.QAnB2MyEQc + return 0 ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' + jq 'del(."grpc.start_time")' + grep -Eo '\{.*\}' + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-5cd4db57-x6px5 ++ mktemp + local LAST_OUT=/tmp/tmp.4MOXtb6j0Z ++ mktemp + local LAST_ERR=/tmp/tmp.GdrUz951PE + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs version-service-5cd4db57-x6px5 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4MOXtb6j0Z + cat /tmp/tmp.GdrUz951PE + rm /tmp/tmp.4MOXtb6j0Z /tmp/tmp.GdrUz951PE + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/disabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/disabled_telemetry.version-service.log.json + [[ disabled == \d\i\s\a\b\l\e\d ]] + [[ recommended == \r\e\c\o\m\m\e\n\d\e\d ]] + diff -u /tmp/tmp.ZAvsDSImr9/enabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/enabled_telemetry.version-service-cr.log.json + kubectl_bin delete perconapgcluster/minimal ++ mktemp + local LAST_OUT=/tmp/tmp.IPW0ZoI9Xi ++ mktemp + local LAST_ERR=/tmp/tmp.ShDRneziiT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete perconapgcluster/minimal + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IPW0ZoI9Xi perconapgcluster.pg.percona.com "minimal" deleted + cat /tmp/tmp.ShDRneziiT + rm /tmp/tmp.IPW0ZoI9Xi /tmp/tmp.ShDRneziiT + return 0 ++ kubectl_bin -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YrFmtlbYeJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.WpqDn7wWmZ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get job --selector=pg-cluster=minimal,pgrmdata=true -o 'jsonpath={.items[0].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.YrFmtlbYeJ ++ cat /tmp/tmp.WpqDn7wWmZ ++ rm /tmp/tmp.YrFmtlbYeJ /tmp/tmp.WpqDn7wWmZ ++ return 0 + wait_job_completion minimal-rmdata-ozhz + local job=minimal-rmdata-ozhz + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' minimal-rmdata-ozhz+ retry=0 ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/minimal-rmdata-ozhz -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + kubectl_bin -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"replace","path":"/spec/template/spec/containers/1/env/7","value":{"name":"DISABLE_TELEMETRY","value":"false"}}]' ++ mktemp + local LAST_OUT=/tmp/tmp.ZmaPJX34QA ++ mktemp + local LAST_ERR=/tmp/tmp.ZxepWki3t0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 patch deployment/postgres-operator --type json '-p=[{"op":"replace","path":"/spec/template/spec/containers/1/env/7","value":{"name":"DISABLE_TELEMETRY","value":"false"}}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZmaPJX34QA deployment.apps/postgres-operator patched + cat /tmp/tmp.ZxepWki3t0 + rm /tmp/tmp.ZmaPJX34QA /tmp/tmp.ZxepWki3t0 + return 0 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace postgres-operator... + desc 'Use simple recommended channel' + set +o xtrace ----------------------------------------------------------------------------------- Use simple recommended channel ----------------------------------------------------------------------------------- + cluster=cluster1 + PGO_TAG=1.5.1 + PG_VER=13 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.eyJH5ul2DR ++ mktemp + local LAST_ERR=/tmp/tmp.iW9X4B4r7c + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eyJH5ul2DR secret/cluster1-users-secret created + cat /tmp/tmp.iW9X4B4r7c + rm /tmp/tmp.eyJH5ul2DR /tmp/tmp.iW9X4B4r7c + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PwzNbXOFez ++ mktemp + local LAST_ERR=/tmp/tmp.USi47owTFv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.PwzNbXOFez secret/cluster1-pmm-secret created + cat /tmp/tmp.USi47owTFv + rm /tmp/tmp.PwzNbXOFez /tmp/tmp.USi47owTFv + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DBpySC0Z8w ++ mktemp + local LAST_ERR=/tmp/tmp.86t8VRceGq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.DBpySC0Z8w deployment.apps/pg-client unchanged + cat /tmp/tmp.86t8VRceGq + rm /tmp/tmp.DBpySC0Z8w /tmp/tmp.86t8VRceGq + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - spec.clustername cluster1 + yq w - metadata.name cluster1 + yq w - spec.user some-name + yq w - metadata.labels.pg-cluster cluster1 + yq w - spec.secretsName cluster1-users-secret + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + yq w - spec.keepBackups false + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + yq w - spec.keepData false + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - metadata.labels.name cluster1 + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + yq w - spec.name cluster1 + yq w - spec.database cluster1 + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.xd3iIv9YSc ++ mktemp + local LAST_ERR=/tmp/tmp.niyruJqSiI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.xd3iIv9YSc perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.niyruJqSiI + rm /tmp/tmp.xd3iIv9YSc /tmp/tmp.niyruJqSiI + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo..... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1............... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' cluster1-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z9c56IYvKp +++ mktemp ++ local LAST_ERR=/tmp/tmp.MtghkuGPtb ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.z9c56IYvKp ++ cat /tmp/tmp.MtghkuGPtb ++ rm /tmp/tmp.z9c56IYvKp /tmp/tmp.MtghkuGPtb ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.N9KmESlEew ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FnjclFCpn3 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.N9KmESlEew +++ cat /tmp/tmp.FnjclFCpn3 +++ rm /tmp/tmp.N9KmESlEew /tmp/tmp.FnjclFCpn3 +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j7PLAvhJvT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rObZYMxvyz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.j7PLAvhJvT +++ cat /tmp/tmp.rObZYMxvyz +++ rm /tmp/tmp.j7PLAvhJvT /tmp/tmp.rObZYMxvyz +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WDHfVQ2Wph ++++ mktemp +++ local LAST_ERR=/tmp/tmp.h5V7Tewowt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.WDHfVQ2Wph +++ cat /tmp/tmp.h5V7Tewowt +++ rm /tmp/tmp.WDHfVQ2Wph /tmp/tmp.h5V7Tewowt +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0vMSort9wa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PPNtHWR8iV +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0vMSort9wa +++ cat /tmp/tmp.PPNtHWR8iV +++ rm /tmp/tmp.0vMSort9wa /tmp/tmp.PPNtHWR8iV +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vXK2KuqEoC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LMquZZCZIS +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.vXK2KuqEoC +++ cat /tmp/tmp.LMquZZCZIS +++ rm /tmp/tmp.vXK2KuqEoC /tmp/tmp.LMquZZCZIS +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YYzjKj3GO2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NvaMDbGI18 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.YYzjKj3GO2 ++++ cat /tmp/tmp.NvaMDbGI18 ++++ rm /tmp/tmp.YYzjKj3GO2 /tmp/tmp.NvaMDbGI18 ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XeGOJxnZDE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CxMrqoS0iv +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.XeGOJxnZDE +++ cat /tmp/tmp.CxMrqoS0iv +++ rm /tmp/tmp.XeGOJxnZDE /tmp/tmp.CxMrqoS0iv +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yZ3PMigvMv +++ mktemp ++ local LAST_ERR=/tmp/tmp.s8Ndf400to ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.yZ3PMigvMv ++ cat /tmp/tmp.s8Ndf400to ++ rm /tmp/tmp.yZ3PMigvMv /tmp/tmp.s8Ndf400to ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.f4ZV6yso0C ++ mktemp + local LAST_ERR=/tmp/tmp.gNEQLZuLc6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.f4ZV6yso0C + cat /tmp/tmp.gNEQLZuLc6 + rm /tmp/tmp.f4ZV6yso0C /tmp/tmp.gNEQLZuLc6 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eAipkAkoEj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LeWmBG7A4C +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.eAipkAkoEj +++ cat /tmp/tmp.LeWmBG7A4C +++ rm /tmp/tmp.eAipkAkoEj /tmp/tmp.LeWmBG7A4C +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aAtZUzSbrI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9it4qyYM7h +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.aAtZUzSbrI +++ cat /tmp/tmp.9it4qyYM7h +++ rm /tmp/tmp.aAtZUzSbrI /tmp/tmp.9it4qyYM7h +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2okFqc0wIq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZQRRJ71E9W +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.2okFqc0wIq +++ cat /tmp/tmp.ZQRRJ71E9W +++ rm /tmp/tmp.2okFqc0wIq /tmp/tmp.ZQRRJ71E9W +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Y2LFkcGvu4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6cqkOAJvVD ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.Y2LFkcGvu4 ++++ cat /tmp/tmp.6cqkOAJvVD ++++ rm /tmp/tmp.Y2LFkcGvu4 /tmp/tmp.6cqkOAJvVD ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.A8ehTgUR2Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Rb1deIPSqv +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.A8ehTgUR2Q +++ cat /tmp/tmp.Rb1deIPSqv +++ rm /tmp/tmp.A8ehTgUR2Q /tmp/tmp.Rb1deIPSqv +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6GSgGFcrs0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dmljfWiibG ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6GSgGFcrs0 ++ cat /tmp/tmp.dmljfWiibG ++ rm /tmp/tmp.6GSgGFcrs0 /tmp/tmp.dmljfWiibG ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.zNek2rMgpV ++ mktemp + local LAST_ERR=/tmp/tmp.A2q6lUU1UR + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zNek2rMgpV + cat /tmp/tmp.A2q6lUU1UR + rm /tmp/tmp.zNek2rMgpV /tmp/tmp.A2q6lUU1UR + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1Kr3fpoPXR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kewKfzRbkp +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1Kr3fpoPXR +++ cat /tmp/tmp.kewKfzRbkp +++ rm /tmp/tmp.1Kr3fpoPXR /tmp/tmp.kewKfzRbkp +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VMz1JhLVXY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Zys2g4bVQ6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.VMz1JhLVXY +++ cat /tmp/tmp.Zys2g4bVQ6 +++ rm /tmp/tmp.VMz1JhLVXY /tmp/tmp.Zys2g4bVQ6 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SzXgnxcrl9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F152pYk0ME +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.SzXgnxcrl9 +++ cat /tmp/tmp.F152pYk0ME +++ rm /tmp/tmp.SzXgnxcrl9 /tmp/tmp.F152pYk0ME +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0L2yinmvyb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yKu3KCg8fF ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.0L2yinmvyb ++++ cat /tmp/tmp.yKu3KCg8fF ++++ rm /tmp/tmp.0L2yinmvyb /tmp/tmp.yKu3KCg8fF ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jOL0PL03cD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.raYAnLJ3z4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jOL0PL03cD +++ cat /tmp/tmp.raYAnLJ3z4 +++ rm /tmp/tmp.jOL0PL03cD /tmp/tmp.raYAnLJ3z4 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LK3b6R6QAS +++ mktemp ++ local LAST_ERR=/tmp/tmp.8NQRD6CZ34 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.LK3b6R6QAS ++ cat /tmp/tmp.8NQRD6CZ34 ++ rm /tmp/tmp.LK3b6R6QAS /tmp/tmp.8NQRD6CZ34 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.7EBhiOeoHv ++ mktemp + local LAST_ERR=/tmp/tmp.bbiVxfBhrm + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.7EBhiOeoHv + cat /tmp/tmp.bbiVxfBhrm + rm /tmp/tmp.7EBhiOeoHv /tmp/tmp.bbiVxfBhrm + return 0 + sleep 10 + apply_version cluster1 9.9.9 recommended 13 http://version-service:11000 0 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=recommended + local db_version=13 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=0 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.K75s9xOS7g ++ mktemp + local LAST_ERR=/tmp/tmp.NekBIXKk7Y + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.K75s9xOS7g perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.NekBIXKk7Y + rm /tmp/tmp.K75s9xOS7g /tmp/tmp.NekBIXKk7Y + return 0 + sleep 10 + wait_smart_update cluster1 0 + local cluster=cluster1 + local target=0 + set +o xtrace ............................................................................. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1 + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.XJ9tzewQVn ++ mktemp + local LAST_ERR=/tmp/tmp.4sTEyYsk0F + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XJ9tzewQVn job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.4sTEyYsk0F + rm /tmp/tmp.XJ9tzewQVn /tmp/tmp.4sTEyYsk0F + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - metadata.name fulldata-cluster1 + yq w - spec.namespace version-service-31598 + yq w - spec.parameters.job-name fulldata-cluster1 + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.name fulldata-cluster1 + kubectl_bin apply -f - ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ mktemp +++ mktemp + yq w --style=single -- - spec.parameters.backrest-opts --type=full + local LAST_OUT=/tmp/tmp.LaSCm6GGJK ++ mktemp ++ local LAST_OUT=/tmp/tmp.mH2RZNprrd +++ mktemp + local LAST_ERR=/tmp/tmp.uxmSd4jHeE + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.9M2tuXJEW8 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.mH2RZNprrd ++ cat /tmp/tmp.9M2tuXJEW8 ++ rm /tmp/tmp.mH2RZNprrd /tmp/tmp.9M2tuXJEW8 ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-57f668d478-2jmng + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LaSCm6GGJK pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.uxmSd4jHeE + rm /tmp/tmp.LaSCm6GGJK /tmp/tmp.uxmSd4jHeE + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 recommended 13 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=recommended + local db_version=13 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.YVz77ybkNr ++ mktemp + local LAST_ERR=/tmp/tmp.A7C7pRf6Po + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YVz77ybkNr + cat /tmp/tmp.A7C7pRf6Po + rm /tmp/tmp.YVz77ybkNr /tmp/tmp.A7C7pRf6Po + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-recommended-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/13.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ..Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'Use major version recommended channel' + set +o xtrace ----------------------------------------------------------------------------------- Use major version recommended channel ----------------------------------------------------------------------------------- + PG_VER=12 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EdZxXmr2vg ++ mktemp + local LAST_ERR=/tmp/tmp.ViY0pgn48j + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.EdZxXmr2vg secret/cluster1-users-secret created + cat /tmp/tmp.ViY0pgn48j + rm /tmp/tmp.EdZxXmr2vg /tmp/tmp.ViY0pgn48j + return 0 + yq w - metadata.name cluster1-pmm-secret + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Sf1gLgUrTc ++ mktemp + local LAST_ERR=/tmp/tmp.dqMCU2atqS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Sf1gLgUrTc secret/cluster1-pmm-secret configured + cat /tmp/tmp.dqMCU2atqS + rm /tmp/tmp.Sf1gLgUrTc /tmp/tmp.dqMCU2atqS + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ZnSReM6bfP ++ mktemp + local LAST_ERR=/tmp/tmp.dJnbPtkoC0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZnSReM6bfP deployment.apps/pg-client unchanged + cat /tmp/tmp.dJnbPtkoC0 + rm /tmp/tmp.ZnSReM6bfP /tmp/tmp.dJnbPtkoC0 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - spec.name cluster1 + yq w - spec.clustername cluster1 + yq w - spec.database cluster1 + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + yq w - spec.keepData false + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + yq w - spec.secretsName cluster1-users-secret + yq w - metadata.labels.name cluster1 + yq w - spec.keepBackups false + yq w - metadata.labels.deployment-name cluster1 + yq w - metadata.name cluster1 + yq w - spec.user some-name + yq w - metadata.labels.pg-cluster cluster1 + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.AcJmkgon44 ++ mktemp + local LAST_ERR=/tmp/tmp.MiB3MrG7Bw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.AcJmkgon44 perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.MiB3MrG7Bw + rm /tmp/tmp.AcJmkgon44 /tmp/tmp.MiB3MrG7Bw + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo..... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1................ + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ynS5UAb92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jenbtSyTf1 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.8ynS5UAb92 ++ cat /tmp/tmp.jenbtSyTf1 ++ rm /tmp/tmp.8ynS5UAb92 /tmp/tmp.jenbtSyTf1 ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S3shX4es3f ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DBBWDnN4vP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.S3shX4es3f +++ cat /tmp/tmp.DBBWDnN4vP +++ rm /tmp/tmp.S3shX4es3f /tmp/tmp.DBBWDnN4vP +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Dojr4It03F ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wr1ECjytlp +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Dojr4It03F +++ cat /tmp/tmp.wr1ECjytlp +++ rm /tmp/tmp.Dojr4It03F /tmp/tmp.wr1ECjytlp +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hzVkmY95wD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wmBS57LpAK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.hzVkmY95wD +++ cat /tmp/tmp.wmBS57LpAK +++ rm /tmp/tmp.hzVkmY95wD /tmp/tmp.wmBS57LpAK +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iOZijukQaB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FPSGduKfyG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iOZijukQaB +++ cat /tmp/tmp.FPSGduKfyG +++ rm /tmp/tmp.iOZijukQaB /tmp/tmp.FPSGduKfyG +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bELtdWIW0A +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ck9SMwsY5l ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.bELtdWIW0A ++++ cat /tmp/tmp.Ck9SMwsY5l ++++ rm /tmp/tmp.bELtdWIW0A /tmp/tmp.Ck9SMwsY5l ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FREjXfLkfv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ie4wiTUCc1 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.FREjXfLkfv +++ cat /tmp/tmp.Ie4wiTUCc1 +++ rm /tmp/tmp.FREjXfLkfv /tmp/tmp.Ie4wiTUCc1 +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0oN0Pij8q0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qp2n8T9vNM ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.0oN0Pij8q0 ++ cat /tmp/tmp.qp2n8T9vNM ++ rm /tmp/tmp.0oN0Pij8q0 /tmp/tmp.qp2n8T9vNM ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.o5dBihnMqN ++ mktemp + local LAST_ERR=/tmp/tmp.gWXg24C4RN + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.o5dBihnMqN + cat /tmp/tmp.gWXg24C4RN + rm /tmp/tmp.o5dBihnMqN /tmp/tmp.gWXg24C4RN + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xJRyarnBlP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RNEuE0LTbm +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.xJRyarnBlP +++ cat /tmp/tmp.RNEuE0LTbm +++ rm /tmp/tmp.xJRyarnBlP /tmp/tmp.RNEuE0LTbm +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rKRzX2aMXk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hUxjB5sY1h +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.rKRzX2aMXk +++ cat /tmp/tmp.hUxjB5sY1h +++ rm /tmp/tmp.rKRzX2aMXk /tmp/tmp.hUxjB5sY1h +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m71I3E4iRe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5yYZuClClL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.m71I3E4iRe +++ cat /tmp/tmp.5yYZuClClL +++ rm /tmp/tmp.m71I3E4iRe /tmp/tmp.5yYZuClClL +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lURJ83jEJn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iUN8r7gRvg ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.lURJ83jEJn ++++ cat /tmp/tmp.iUN8r7gRvg ++++ rm /tmp/tmp.lURJ83jEJn /tmp/tmp.iUN8r7gRvg ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LhffWoPVxJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Yb02tAXDTU +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.LhffWoPVxJ +++ cat /tmp/tmp.Yb02tAXDTU +++ rm /tmp/tmp.LhffWoPVxJ /tmp/tmp.Yb02tAXDTU +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fe6zAMELUw +++ mktemp ++ local LAST_ERR=/tmp/tmp.WMYroDah2v ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.fe6zAMELUw ++ cat /tmp/tmp.WMYroDah2v ++ rm /tmp/tmp.fe6zAMELUw /tmp/tmp.WMYroDah2v ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.0qQDN4kwAo ++ mktemp + local LAST_ERR=/tmp/tmp.DKDObtpeG3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0qQDN4kwAo + cat /tmp/tmp.DKDObtpeG3 + rm /tmp/tmp.0qQDN4kwAo /tmp/tmp.DKDObtpeG3 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GfpiDmvflg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rEOqmELx8u +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GfpiDmvflg +++ cat /tmp/tmp.rEOqmELx8u +++ rm /tmp/tmp.GfpiDmvflg /tmp/tmp.rEOqmELx8u +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0DapS7T7IK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WNP1C7XiAf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0DapS7T7IK +++ cat /tmp/tmp.WNP1C7XiAf +++ rm /tmp/tmp.0DapS7T7IK /tmp/tmp.WNP1C7XiAf +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nUCRe2PT7A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TXnxaV4fYW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nUCRe2PT7A +++ cat /tmp/tmp.TXnxaV4fYW +++ rm /tmp/tmp.nUCRe2PT7A /tmp/tmp.TXnxaV4fYW +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3fxV1FackG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iTIwqorHyi ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.3fxV1FackG ++++ cat /tmp/tmp.iTIwqorHyi ++++ rm /tmp/tmp.3fxV1FackG /tmp/tmp.iTIwqorHyi ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tSfGiGkDZF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iaU0Ke3wJw +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tSfGiGkDZF +++ cat /tmp/tmp.iaU0Ke3wJw +++ rm /tmp/tmp.tSfGiGkDZF /tmp/tmp.iaU0Ke3wJw +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4OdoMNlpuj +++ mktemp ++ local LAST_ERR=/tmp/tmp.b2MA28Dyb1 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.4OdoMNlpuj ++ cat /tmp/tmp.b2MA28Dyb1 ++ rm /tmp/tmp.4OdoMNlpuj /tmp/tmp.b2MA28Dyb1 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.TtIVm18PNj ++ mktemp + local LAST_ERR=/tmp/tmp.FymGmF8gsc + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TtIVm18PNj + cat /tmp/tmp.FymGmF8gsc + rm /tmp/tmp.TtIVm18PNj /tmp/tmp.FymGmF8gsc + return 0 + sleep 10 + apply_version cluster1 9.9.9 12-recommended 12 http://version-service:11000 1 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=12-recommended + local db_version=12 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=1 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"12-recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.1Z0jVT78es ++ mktemp + local LAST_ERR=/tmp/tmp.58h18VoRDq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"12-recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1Z0jVT78es perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.58h18VoRDq + rm /tmp/tmp.1Z0jVT78es /tmp/tmp.58h18VoRDq + return 0 + sleep 10 + wait_smart_update cluster1 1 + local cluster=cluster1 + local target=1 + set +o xtrace ....................................................................... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1 + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.IjuvEBXvoU ++ mktemp + local LAST_ERR=/tmp/tmp.4yWE44O9PT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IjuvEBXvoU job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.4yWE44O9PT + rm /tmp/tmp.IjuvEBXvoU /tmp/tmp.4yWE44O9PT + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w - metadata.name fulldata-cluster1 + yq w - spec.parameters.pg-cluster cluster1 + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.name fulldata-cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 + yq w - spec.namespace version-service-31598 ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.2FfhZOrj03 + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.4qSdB7nMYw +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.60M9JeUK3Y + local exit_status=0 + local timeout=4 + set +o errexit ++ local LAST_ERR=/tmp/tmp.lj0wG5FraI ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.2FfhZOrj03 ++ cat /tmp/tmp.lj0wG5FraI ++ rm /tmp/tmp.2FfhZOrj03 /tmp/tmp.lj0wG5FraI ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-5f647d77b5-mf4tj + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4qSdB7nMYw pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.60M9JeUK3Y + rm /tmp/tmp.4qSdB7nMYw /tmp/tmp.60M9JeUK3Y + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 12-recommended 12 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=12-recommended + local db_version=12 + sort -s -u + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' ++ mktemp + local LAST_OUT=/tmp/tmp.smPDCHz5rx ++ mktemp + local LAST_ERR=/tmp/tmp.98hTwuE0PD + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.smPDCHz5rx + cat /tmp/tmp.98hTwuE0PD + rm /tmp/tmp.smPDCHz5rx /tmp/tmp.98hTwuE0PD + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-12-recommended-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/12.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'Updating PG cluster with 14 from 13' + set +o xtrace ----------------------------------------------------------------------------------- Updating PG cluster with 14 from 13 ----------------------------------------------------------------------------------- + PG_VER=13 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - + yq w - metadata.name cluster1-users-secret ++ mktemp + local LAST_OUT=/tmp/tmp.0MQxhN8U40 ++ mktemp + local LAST_ERR=/tmp/tmp.O95SfO5Hp2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0MQxhN8U40 secret/cluster1-users-secret created + cat /tmp/tmp.O95SfO5Hp2 + rm /tmp/tmp.0MQxhN8U40 /tmp/tmp.O95SfO5Hp2 + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.W7BIwEUzsM ++ mktemp + local LAST_ERR=/tmp/tmp.64IChRIz1w + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.W7BIwEUzsM secret/cluster1-pmm-secret configured + cat /tmp/tmp.64IChRIz1w + rm /tmp/tmp.W7BIwEUzsM /tmp/tmp.64IChRIz1w + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.tWWMnBy1hl ++ mktemp + local LAST_ERR=/tmp/tmp.F9jV6dqZgX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.tWWMnBy1hl deployment.apps/pg-client unchanged + cat /tmp/tmp.F9jV6dqZgX + rm /tmp/tmp.tWWMnBy1hl /tmp/tmp.F9jV6dqZgX + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w - metadata.labels.name cluster1 + yq w - spec.name cluster1 + yq w - spec.keepData false + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + yq w - metadata.name cluster1 + yq w - spec.keepBackups false + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + yq w - metadata.labels.pg-cluster cluster1 + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + yq w - spec.secretsName cluster1-users-secret + yq w - spec.user some-name + yq w - spec.database cluster1 + yq w - spec.clustername cluster1 + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.ZVtm59P9dm ++ mktemp + local LAST_ERR=/tmp/tmp.zpVzK4ltu7 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZVtm59P9dm perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.zpVzK4ltu7 + rm /tmp/tmp.ZVtm59P9dm /tmp/tmp.zpVzK4ltu7 + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1........ + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LfNTPD802N +++ mktemp ++ local LAST_ERR=/tmp/tmp.GEHS2uPjlG ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.LfNTPD802N ++ cat /tmp/tmp.GEHS2uPjlG ++ rm /tmp/tmp.LfNTPD802N /tmp/tmp.GEHS2uPjlG ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.au0DgQs2pu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qdPKci6z4i +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.au0DgQs2pu +++ cat /tmp/tmp.qdPKci6z4i +++ rm /tmp/tmp.au0DgQs2pu /tmp/tmp.qdPKci6z4i +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qa5KsuD2hd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p5RyGtKqFt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qa5KsuD2hd +++ cat /tmp/tmp.p5RyGtKqFt +++ rm /tmp/tmp.qa5KsuD2hd /tmp/tmp.p5RyGtKqFt +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ux76xJR6Jw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.E2ExgCnZ18 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ux76xJR6Jw +++ cat /tmp/tmp.E2ExgCnZ18 +++ rm /tmp/tmp.ux76xJR6Jw /tmp/tmp.E2ExgCnZ18 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TFRtpoVzeB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4lqP5AsnqB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TFRtpoVzeB +++ cat /tmp/tmp.4lqP5AsnqB +++ rm /tmp/tmp.TFRtpoVzeB /tmp/tmp.4lqP5AsnqB +++ return 0 ++ [[ xcluster1-users-secret == \x ]] ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VUWc4K8iNP +++ base64 -d +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Tkb6P4KQFc ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.VUWc4K8iNP ++++ cat /tmp/tmp.Tkb6P4KQFc ++++ rm /tmp/tmp.VUWc4K8iNP /tmp/tmp.Tkb6P4KQFc ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gp4iMKEn43 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VDAG9xqopf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Gp4iMKEn43 +++ cat /tmp/tmp.VDAG9xqopf +++ rm /tmp/tmp.Gp4iMKEn43 /tmp/tmp.VDAG9xqopf +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i7DJQCeRfP +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzAH6PISyu ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.i7DJQCeRfP ++ cat /tmp/tmp.kzAH6PISyu ++ rm /tmp/tmp.i7DJQCeRfP /tmp/tmp.kzAH6PISyu ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.auZdMR3UZ9 ++ mktemp + local LAST_ERR=/tmp/tmp.51x63kIP4x + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.auZdMR3UZ9 + cat /tmp/tmp.51x63kIP4x + rm /tmp/tmp.auZdMR3UZ9 /tmp/tmp.51x63kIP4x + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nXuHbrj6UB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TMSovSCpNI +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nXuHbrj6UB +++ cat /tmp/tmp.TMSovSCpNI +++ rm /tmp/tmp.nXuHbrj6UB /tmp/tmp.TMSovSCpNI +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RyRTOM2CN2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6Z3XglOMGu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.RyRTOM2CN2 +++ cat /tmp/tmp.6Z3XglOMGu +++ rm /tmp/tmp.RyRTOM2CN2 /tmp/tmp.6Z3XglOMGu +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5WrZGQL0k8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.B1Gq7dEdWt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.5WrZGQL0k8 +++ cat /tmp/tmp.B1Gq7dEdWt +++ rm /tmp/tmp.5WrZGQL0k8 /tmp/tmp.B1Gq7dEdWt +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zYAXYDDJx8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.cM51BJdDOD ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.zYAXYDDJx8 ++++ cat /tmp/tmp.cM51BJdDOD ++++ rm /tmp/tmp.zYAXYDDJx8 /tmp/tmp.cM51BJdDOD ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eqESRQLbWL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dj0JggY7G0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.eqESRQLbWL +++ cat /tmp/tmp.Dj0JggY7G0 +++ rm /tmp/tmp.eqESRQLbWL /tmp/tmp.Dj0JggY7G0 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7JfotYj1Rq +++ mktemp ++ local LAST_ERR=/tmp/tmp.uqQkktVLV1 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.7JfotYj1Rq ++ cat /tmp/tmp.uqQkktVLV1 ++ rm /tmp/tmp.7JfotYj1Rq /tmp/tmp.uqQkktVLV1 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.mTuou2mecR ++ mktemp + local LAST_ERR=/tmp/tmp.AwtgDf87rL + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.mTuou2mecR + cat /tmp/tmp.AwtgDf87rL + rm /tmp/tmp.mTuou2mecR /tmp/tmp.AwtgDf87rL + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WdgWovkJEu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MVio44zgKT +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.WdgWovkJEu +++ cat /tmp/tmp.MVio44zgKT +++ rm /tmp/tmp.WdgWovkJEu /tmp/tmp.MVio44zgKT +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t3e0DOrHsa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.00NJXC8s0t +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.t3e0DOrHsa +++ cat /tmp/tmp.00NJXC8s0t +++ rm /tmp/tmp.t3e0DOrHsa /tmp/tmp.00NJXC8s0t +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HMZicRFxz1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.L90bvxM3V3 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.HMZicRFxz1 +++ cat /tmp/tmp.L90bvxM3V3 +++ rm /tmp/tmp.HMZicRFxz1 /tmp/tmp.L90bvxM3V3 +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.i9PLaVfYdF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.xcIcmZv8v2 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.i9PLaVfYdF ++++ cat /tmp/tmp.xcIcmZv8v2 ++++ rm /tmp/tmp.i9PLaVfYdF /tmp/tmp.xcIcmZv8v2 ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n7SXFrMUgN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LuIJpr9dRt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.n7SXFrMUgN +++ cat /tmp/tmp.LuIJpr9dRt +++ rm /tmp/tmp.n7SXFrMUgN /tmp/tmp.LuIJpr9dRt +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NYnIaVz5xw +++ mktemp ++ local LAST_ERR=/tmp/tmp.NdFzxEFFup ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.NYnIaVz5xw ++ cat /tmp/tmp.NdFzxEFFup ++ rm /tmp/tmp.NYnIaVz5xw /tmp/tmp.NdFzxEFFup ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.1OI8abDeaO ++ mktemp + local LAST_ERR=/tmp/tmp.MO9P4DwrdU + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1OI8abDeaO + cat /tmp/tmp.MO9P4DwrdU + rm /tmp/tmp.1OI8abDeaO /tmp/tmp.MO9P4DwrdU + return 0 + sleep 10 + apply_version cluster1 9.9.9 14-recommended 1.5.1-13 http://version-service:11000 1 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=14-recommended + local db_version=1.5.1-13 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=1 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"14-recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.WLc3yi92cG ++ mktemp + local LAST_ERR=/tmp/tmp.6o2JKRbxGb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"14-recommended"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WLc3yi92cG perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.6o2JKRbxGb + rm /tmp/tmp.WLc3yi92cG /tmp/tmp.6o2JKRbxGb + return 0 + sleep 10 + wait_smart_update cluster1 1 + local cluster=cluster1 + local target=1 + set +o xtrace + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1.......... + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.bHt4xey670 ++ mktemp + local LAST_ERR=/tmp/tmp.g5moYpB3NT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.bHt4xey670 job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.g5moYpB3NT + rm /tmp/tmp.bHt4xey670 /tmp/tmp.g5moYpB3NT + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - metadata.name fulldata-cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.namespace version-service-31598 + kubectl_bin apply -f - ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ mktemp ++ local LAST_OUT=/tmp/tmp.abtZDFXSfl + local LAST_OUT=/tmp/tmp.oSq3RQ0xtC +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ndWqn11T7I + local exit_status=0 + local timeout=4 + set +o errexit ++ local LAST_ERR=/tmp/tmp.RMFFGa0ggh ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.abtZDFXSfl ++ cat /tmp/tmp.RMFFGa0ggh ++ rm /tmp/tmp.abtZDFXSfl /tmp/tmp.RMFFGa0ggh ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-65857c588c-9h2zp + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oSq3RQ0xtC pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.ndWqn11T7I + rm /tmp/tmp.oSq3RQ0xtC /tmp/tmp.ndWqn11T7I + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 14-recommended 1.5.1-13 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=14-recommended + local db_version=1.5.1-13 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.J4kKeZOZ2G ++ mktemp + local LAST_ERR=/tmp/tmp.nbeKg6s24C + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.J4kKeZOZ2G + cat /tmp/tmp.nbeKg6s24C + rm /tmp/tmp.J4kKeZOZ2G /tmp/tmp.nbeKg6s24C + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-14-recommended-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/1.5.1-13.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'PG cluster update with the latest image by version service' + set +o xtrace ----------------------------------------------------------------------------------- PG cluster update with the latest image by version service ----------------------------------------------------------------------------------- + PG_VER=12 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq w - metadata.name cluster1-users-secret + kubectl_bin -n version-service-31598 apply -f - + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.1Ssa8G6D8K + yq w - stringData.some-name some-name_pass ++ mktemp + local LAST_ERR=/tmp/tmp.6Usp8Vzhqn + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1Ssa8G6D8K secret/cluster1-users-secret created + cat /tmp/tmp.6Usp8Vzhqn + rm /tmp/tmp.1Ssa8G6D8K /tmp/tmp.6Usp8Vzhqn + return 0 + yq w - metadata.name cluster1-pmm-secret + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.61aR2VEaVp ++ mktemp + local LAST_ERR=/tmp/tmp.xDbVoaeSZH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.61aR2VEaVp secret/cluster1-pmm-secret configured + cat /tmp/tmp.xDbVoaeSZH + rm /tmp/tmp.61aR2VEaVp /tmp/tmp.xDbVoaeSZH + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SmyPLWi93x ++ mktemp + local LAST_ERR=/tmp/tmp.ktyiMVx6M3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SmyPLWi93x deployment.apps/pg-client unchanged + cat /tmp/tmp.ktyiMVx6M3 + rm /tmp/tmp.SmyPLWi93x /tmp/tmp.ktyiMVx6M3 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w - metadata.name cluster1 + yq w - metadata.labels.name cluster1 + yq w - metadata.labels.pg-cluster cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - spec.name cluster1 + yq w - spec.keepData false + yq w - spec.keepBackups false + yq w - spec.user some-name + yq w - spec.clustername cluster1 + yq w - spec.database cluster1 + yq w - spec.secretsName cluster1-users-secret + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.FCfP4RRMZQ ++ mktemp + local LAST_ERR=/tmp/tmp.NETPf8gtaH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.FCfP4RRMZQ perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.NETPf8gtaH + rm /tmp/tmp.FCfP4RRMZQ /tmp/tmp.NETPf8gtaH + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1...... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' cluster1-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s9hCaz9K6P +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lp3wKRQYMp ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.s9hCaz9K6P ++ cat /tmp/tmp.Lp3wKRQYMp ++ rm /tmp/tmp.s9hCaz9K6P /tmp/tmp.Lp3wKRQYMp ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GzC1c1fE1Z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VHJup766ln +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GzC1c1fE1Z +++ cat /tmp/tmp.VHJup766ln +++ rm /tmp/tmp.GzC1c1fE1Z /tmp/tmp.VHJup766ln +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b0XxIpThPf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uFb6OaQpnt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.b0XxIpThPf +++ cat /tmp/tmp.uFb6OaQpnt +++ rm /tmp/tmp.b0XxIpThPf /tmp/tmp.uFb6OaQpnt +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FHDyBvbO5g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dga9ffAEfB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.FHDyBvbO5g +++ cat /tmp/tmp.Dga9ffAEfB +++ rm /tmp/tmp.FHDyBvbO5g /tmp/tmp.Dga9ffAEfB +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CEKt3nA92x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zFHx6mXxib +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CEKt3nA92x +++ cat /tmp/tmp.zFHx6mXxib +++ rm /tmp/tmp.CEKt3nA92x /tmp/tmp.zFHx6mXxib +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.01DUtPxK2U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bqqpATMhKN +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.01DUtPxK2U +++ cat /tmp/tmp.bqqpATMhKN +++ rm /tmp/tmp.01DUtPxK2U /tmp/tmp.bqqpATMhKN +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lx9X0zr5LK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5MAHpq0lgx ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.lx9X0zr5LK ++++ cat /tmp/tmp.5MAHpq0lgx ++++ rm /tmp/tmp.lx9X0zr5LK /tmp/tmp.5MAHpq0lgx ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EyBhuRUIRB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.n9Fi0CXe9g +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.EyBhuRUIRB +++ cat /tmp/tmp.n9Fi0CXe9g +++ rm /tmp/tmp.EyBhuRUIRB /tmp/tmp.n9Fi0CXe9g +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j8jSJ3ts2F +++ mktemp ++ local LAST_ERR=/tmp/tmp.jmFTMHU3vn ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.j8jSJ3ts2F ++ cat /tmp/tmp.jmFTMHU3vn ++ rm /tmp/tmp.j8jSJ3ts2F /tmp/tmp.jmFTMHU3vn ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.3tjbreiclm ++ mktemp + local LAST_ERR=/tmp/tmp.uT6AwEUxi7 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.3tjbreiclm + cat /tmp/tmp.uT6AwEUxi7 + rm /tmp/tmp.3tjbreiclm /tmp/tmp.uT6AwEUxi7 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VoLm6neRFz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aL1hFdi6bK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.VoLm6neRFz +++ cat /tmp/tmp.aL1hFdi6bK +++ rm /tmp/tmp.VoLm6neRFz /tmp/tmp.aL1hFdi6bK +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.13nZclgaaO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qhMW5JW49c +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.13nZclgaaO +++ cat /tmp/tmp.qhMW5JW49c +++ rm /tmp/tmp.13nZclgaaO /tmp/tmp.qhMW5JW49c +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5sOYWIVQU2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DVk8ecSJoV +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.5sOYWIVQU2 +++ cat /tmp/tmp.DVk8ecSJoV +++ rm /tmp/tmp.5sOYWIVQU2 /tmp/tmp.DVk8ecSJoV +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cQYtXouM49 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hJ0R51Xb2G ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.cQYtXouM49 ++++ cat /tmp/tmp.hJ0R51Xb2G ++++ rm /tmp/tmp.cQYtXouM49 /tmp/tmp.hJ0R51Xb2G ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bEmEy6q3Co ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ku3ijVQ2mr +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.bEmEy6q3Co +++ cat /tmp/tmp.Ku3ijVQ2mr +++ rm /tmp/tmp.bEmEy6q3Co /tmp/tmp.Ku3ijVQ2mr +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AtwKRDGmJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.schXraXN8W ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.AtwKRDGmJz ++ cat /tmp/tmp.schXraXN8W ++ rm /tmp/tmp.AtwKRDGmJz /tmp/tmp.schXraXN8W ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.BymCwFf0mM ++ mktemp + local LAST_ERR=/tmp/tmp.fIu48yyA3Z + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BymCwFf0mM + cat /tmp/tmp.fIu48yyA3Z + rm /tmp/tmp.BymCwFf0mM /tmp/tmp.fIu48yyA3Z + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5N3kx3DTbi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lpRpixK3ie +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.5N3kx3DTbi +++ cat /tmp/tmp.lpRpixK3ie +++ rm /tmp/tmp.5N3kx3DTbi /tmp/tmp.lpRpixK3ie +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mpEsB77BCM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6ledqXIpwQ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.mpEsB77BCM +++ cat /tmp/tmp.6ledqXIpwQ +++ rm /tmp/tmp.mpEsB77BCM /tmp/tmp.6ledqXIpwQ +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S4TqrjxnnN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W24jetPxue +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.S4TqrjxnnN +++ cat /tmp/tmp.W24jetPxue +++ rm /tmp/tmp.S4TqrjxnnN /tmp/tmp.W24jetPxue +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1391bC0QqY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6pbKIZiwCg ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.1391bC0QqY ++++ cat /tmp/tmp.6pbKIZiwCg ++++ rm /tmp/tmp.1391bC0QqY /tmp/tmp.6pbKIZiwCg ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QWqozJ8Gh9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JYaDBvLOsA +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QWqozJ8Gh9 +++ cat /tmp/tmp.JYaDBvLOsA +++ rm /tmp/tmp.QWqozJ8Gh9 /tmp/tmp.JYaDBvLOsA +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qkF5JhS0hs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RTOPMrlL75 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.qkF5JhS0hs ++ cat /tmp/tmp.RTOPMrlL75 ++ rm /tmp/tmp.qkF5JhS0hs /tmp/tmp.RTOPMrlL75 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.o7MM2ESV5z ++ mktemp + local LAST_ERR=/tmp/tmp.an5Tg1HtgR + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.o7MM2ESV5z + cat /tmp/tmp.an5Tg1HtgR + rm /tmp/tmp.o7MM2ESV5z /tmp/tmp.an5Tg1HtgR + return 0 + sleep 10 + apply_version cluster1 9.9.9 latest 12 http://version-service:11000 2 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=latest + local db_version=12 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=2 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.MxRHT1hkY2 ++ mktemp + local LAST_ERR=/tmp/tmp.5owxm8NWha + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.MxRHT1hkY2 perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.5owxm8NWha + rm /tmp/tmp.MxRHT1hkY2 /tmp/tmp.5owxm8NWha + return 0 + sleep 10 + wait_smart_update cluster1 2 + local cluster=cluster1 + local target=2 + set +o xtrace ....................................................... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1 + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.qAOT7ORQ9Z ++ mktemp + local LAST_ERR=/tmp/tmp.cqZyoFLoVF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qAOT7ORQ9Z job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.cqZyoFLoVF + rm /tmp/tmp.qAOT7ORQ9Z /tmp/tmp.cqZyoFLoVF + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 + yq w --style=single -- - spec.parameters.backrest-opts --type=full + kubectl_bin apply -f - + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.namespace version-service-31598 ++ mktemp + yq w - metadata.name fulldata-cluster1 + local LAST_OUT=/tmp/tmp.0zTYEuZoyK ++ mktemp + local LAST_ERR=/tmp/tmp.On1IOUefV0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXD7r87CP3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3GiPE54Kms ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.XXD7r87CP3 ++ cat /tmp/tmp.3GiPE54Kms ++ rm /tmp/tmp.XXD7r87CP3 /tmp/tmp.3GiPE54Kms ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-5f647d77b5-n6ml9 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0zTYEuZoyK pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.On1IOUefV0 + rm /tmp/tmp.0zTYEuZoyK /tmp/tmp.On1IOUefV0 + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 latest 12 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=latest + local db_version=12 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.SHVnmF2EQv ++ mktemp + local LAST_ERR=/tmp/tmp.3xzMXMy4FL + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SHVnmF2EQv + cat /tmp/tmp.3xzMXMy4FL + rm /tmp/tmp.SHVnmF2EQv /tmp/tmp.3xzMXMy4FL + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-latest-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/12.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'PG cluster with major version latest release channel' + set +o xtrace ----------------------------------------------------------------------------------- PG cluster with major version latest release channel ----------------------------------------------------------------------------------- + PG_VER=12 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Jh5FtSZyyc ++ mktemp + local LAST_ERR=/tmp/tmp.6GJkRnmiaG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Jh5FtSZyyc secret/cluster1-users-secret created + cat /tmp/tmp.6GJkRnmiaG + rm /tmp/tmp.Jh5FtSZyyc /tmp/tmp.6GJkRnmiaG + return 0 + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.UlRfdEJJC3 ++ mktemp + local LAST_ERR=/tmp/tmp.Uix29zhyb3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.UlRfdEJJC3 secret/cluster1-pmm-secret configured + cat /tmp/tmp.Uix29zhyb3 + rm /tmp/tmp.UlRfdEJJC3 /tmp/tmp.Uix29zhyb3 + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SbzuqDff1x ++ mktemp + local LAST_ERR=/tmp/tmp.wQX9Rh3syC + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SbzuqDff1x deployment.apps/pg-client unchanged + cat /tmp/tmp.wQX9Rh3syC + rm /tmp/tmp.SbzuqDff1x /tmp/tmp.wQX9Rh3syC + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.name cluster1 + yq w - spec.clustername cluster1 + yq w - spec.database cluster1 + yq w - spec.keepBackups false + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + yq w - spec.keepData false + yq w - spec.name cluster1 + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w - metadata.name cluster1 + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.user some-name + yq w - spec.secretsName cluster1-users-secret + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + yq w - metadata.labels.pg-cluster cluster1 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.WJTKOOGVla ++ mktemp + local LAST_ERR=/tmp/tmp.KU0ZPp9QT4 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WJTKOOGVla perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.KU0ZPp9QT4 + rm /tmp/tmp.WJTKOOGVla /tmp/tmp.KU0ZPp9QT4 + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1......... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pOyPN7CawK +++ mktemp ++ local LAST_ERR=/tmp/tmp.IFtBWYZXPL ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.pOyPN7CawK ++ cat /tmp/tmp.IFtBWYZXPL ++ rm /tmp/tmp.pOyPN7CawK /tmp/tmp.IFtBWYZXPL ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JW1A45Q1gt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jnXXGMB5iz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.JW1A45Q1gt +++ cat /tmp/tmp.jnXXGMB5iz +++ rm /tmp/tmp.JW1A45Q1gt /tmp/tmp.jnXXGMB5iz +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ps4Q1mqcOy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BXkBHSGjmQ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ps4Q1mqcOy +++ cat /tmp/tmp.BXkBHSGjmQ +++ rm /tmp/tmp.ps4Q1mqcOy /tmp/tmp.BXkBHSGjmQ +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0xgBns7ofK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c2ou1HMEnM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0xgBns7ofK +++ cat /tmp/tmp.c2ou1HMEnM +++ rm /tmp/tmp.0xgBns7ofK /tmp/tmp.c2ou1HMEnM +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CSr02B9RsS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZY5UPZWWJ7 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CSr02B9RsS +++ cat /tmp/tmp.ZY5UPZWWJ7 +++ rm /tmp/tmp.CSr02B9RsS /tmp/tmp.ZY5UPZWWJ7 +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OD9RBTecrO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pncGn6gs4N ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.OD9RBTecrO ++++ cat /tmp/tmp.pncGn6gs4N ++++ rm /tmp/tmp.OD9RBTecrO /tmp/tmp.pncGn6gs4N ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kcvuHvOPKk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.snPMolkYxt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kcvuHvOPKk +++ cat /tmp/tmp.snPMolkYxt +++ rm /tmp/tmp.kcvuHvOPKk /tmp/tmp.snPMolkYxt +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6CawfROWTP +++ mktemp ++ local LAST_ERR=/tmp/tmp.UqPYxNsmua ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6CawfROWTP ++ cat /tmp/tmp.UqPYxNsmua ++ rm /tmp/tmp.6CawfROWTP /tmp/tmp.UqPYxNsmua ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.tloUlaAlvX ++ mktemp + local LAST_ERR=/tmp/tmp.ak6Akkj8I3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.tloUlaAlvX + cat /tmp/tmp.ak6Akkj8I3 + rm /tmp/tmp.tloUlaAlvX /tmp/tmp.ak6Akkj8I3 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IQMpx9D73F ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VlcHqLX4aE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IQMpx9D73F +++ cat /tmp/tmp.VlcHqLX4aE +++ rm /tmp/tmp.IQMpx9D73F /tmp/tmp.VlcHqLX4aE +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.suCZgSYOwN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GfnFtVnX7I +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.suCZgSYOwN +++ cat /tmp/tmp.GfnFtVnX7I +++ rm /tmp/tmp.suCZgSYOwN /tmp/tmp.GfnFtVnX7I +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.k4gHpOtO2F ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9U00vxrjCx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.k4gHpOtO2F +++ cat /tmp/tmp.9U00vxrjCx +++ rm /tmp/tmp.k4gHpOtO2F /tmp/tmp.9U00vxrjCx +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1Etsc28gFT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9ERTj4a7qF ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.1Etsc28gFT ++++ cat /tmp/tmp.9ERTj4a7qF ++++ rm /tmp/tmp.1Etsc28gFT /tmp/tmp.9ERTj4a7qF ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wS2d7UEvVY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QnNGdWsR80 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wS2d7UEvVY +++ cat /tmp/tmp.QnNGdWsR80 +++ rm /tmp/tmp.wS2d7UEvVY /tmp/tmp.QnNGdWsR80 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YtnM2Vc5A2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PN8sp9zCCY ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.YtnM2Vc5A2 ++ cat /tmp/tmp.PN8sp9zCCY ++ rm /tmp/tmp.YtnM2Vc5A2 /tmp/tmp.PN8sp9zCCY ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.1KsAebSE5C ++ mktemp + local LAST_ERR=/tmp/tmp.hObH46lvL8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1KsAebSE5C + cat /tmp/tmp.hObH46lvL8 + rm /tmp/tmp.1KsAebSE5C /tmp/tmp.hObH46lvL8 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0CWSJMj5ln ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mvWNUbRoLf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0CWSJMj5ln +++ cat /tmp/tmp.mvWNUbRoLf +++ rm /tmp/tmp.0CWSJMj5ln /tmp/tmp.mvWNUbRoLf +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CK4JF46olj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NxcpJRmLu5 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CK4JF46olj +++ cat /tmp/tmp.NxcpJRmLu5 +++ rm /tmp/tmp.CK4JF46olj /tmp/tmp.NxcpJRmLu5 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lSXxGKEoTb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.60wUqACFVa +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.lSXxGKEoTb +++ cat /tmp/tmp.60wUqACFVa +++ rm /tmp/tmp.lSXxGKEoTb /tmp/tmp.60wUqACFVa +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lagJxRfRNJ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NdvAOxyZWV ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.lagJxRfRNJ ++++ cat /tmp/tmp.NdvAOxyZWV ++++ rm /tmp/tmp.lagJxRfRNJ /tmp/tmp.NdvAOxyZWV ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.faNrFTGzjq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GaRNkEfkAj +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.faNrFTGzjq +++ cat /tmp/tmp.GaRNkEfkAj +++ rm /tmp/tmp.faNrFTGzjq /tmp/tmp.GaRNkEfkAj +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fce7354uFL +++ mktemp ++ local LAST_ERR=/tmp/tmp.bLoQAzQurW ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.fce7354uFL ++ cat /tmp/tmp.bLoQAzQurW ++ rm /tmp/tmp.fce7354uFL /tmp/tmp.bLoQAzQurW ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.hQ3h1Pr8fu ++ mktemp + local LAST_ERR=/tmp/tmp.CYHu5e21jx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hQ3h1Pr8fu + cat /tmp/tmp.CYHu5e21jx + rm /tmp/tmp.hQ3h1Pr8fu /tmp/tmp.CYHu5e21jx + return 0 + sleep 10 + apply_version cluster1 9.9.9 12-latest 12 http://version-service:11000 3 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=12-latest + local db_version=12 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=3 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"12-latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.upnJr0kroK ++ mktemp + local LAST_ERR=/tmp/tmp.FPKZlDsxfG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"12-latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.upnJr0kroK perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.FPKZlDsxfG + rm /tmp/tmp.upnJr0kroK /tmp/tmp.FPKZlDsxfG + return 0 + sleep 10 + wait_smart_update cluster1 3 + local cluster=cluster1 + local target=3 + set +o xtrace ..................................................... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1 + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.d35fRQO8Il ++ mktemp + local LAST_ERR=/tmp/tmp.aEqCcAAtBk + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.d35fRQO8Il job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.aEqCcAAtBk + rm /tmp/tmp.d35fRQO8Il /tmp/tmp.aEqCcAAtBk + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - metadata.name fulldata-cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 + kubectl_bin apply -f - + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.namespace version-service-31598 ++ mktemp ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' +++ mktemp + local LAST_OUT=/tmp/tmp.Zq11qOn6Ek ++ mktemp + local LAST_ERR=/tmp/tmp.2kTthi9FtU + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - ++ local LAST_OUT=/tmp/tmp.PVse1pMWX6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uZ4SfhLbmp ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.PVse1pMWX6 ++ cat /tmp/tmp.uZ4SfhLbmp ++ rm /tmp/tmp.PVse1pMWX6 /tmp/tmp.uZ4SfhLbmp ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-5f647d77b5-94d7j + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Zq11qOn6Ek pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.2kTthi9FtU + rm /tmp/tmp.Zq11qOn6Ek /tmp/tmp.2kTthi9FtU + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ grep -io 'not found' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 12-latest 12 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=12-latest + local db_version=12 + sort -s -u + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' ++ mktemp + local LAST_OUT=/tmp/tmp.SxBoM2Jyue ++ mktemp + local LAST_ERR=/tmp/tmp.hhDcDeay7A + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SxBoM2Jyue + cat /tmp/tmp.hhDcDeay7A + rm /tmp/tmp.SxBoM2Jyue /tmp/tmp.hhDcDeay7A + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-12-latest-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/12.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'PG cluster update with explicitly specified image inside version service' + set +o xtrace ----------------------------------------------------------------------------------- PG cluster update with explicitly specified image inside version service ----------------------------------------------------------------------------------- + PG_VER=13 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.khiVVeEbrS ++ mktemp + local LAST_ERR=/tmp/tmp.LhJqKjQDyZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.khiVVeEbrS deployment.apps/pg-client unchanged + cat /tmp/tmp.LhJqKjQDyZ + rm /tmp/tmp.khiVVeEbrS /tmp/tmp.LhJqKjQDyZ + return 0 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.o4GEvTTXmZ ++ mktemp + local LAST_ERR=/tmp/tmp.Lmm17T91Ag + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.o4GEvTTXmZ secret/cluster1-users-secret created + cat /tmp/tmp.Lmm17T91Ag + rm /tmp/tmp.o4GEvTTXmZ /tmp/tmp.Lmm17T91Ag + return 0 + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YREguLPuMC ++ mktemp + local LAST_ERR=/tmp/tmp.sTs94vK9ya + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YREguLPuMC secret/cluster1-pmm-secret configured + cat /tmp/tmp.sTs94vK9ya + rm /tmp/tmp.YREguLPuMC /tmp/tmp.sTs94vK9ya + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.opp0UHYxlK ++ mktemp + local LAST_ERR=/tmp/tmp.4HqUaaQIL0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.opp0UHYxlK deployment.apps/pg-client unchanged + cat /tmp/tmp.4HqUaaQIL0 + rm /tmp/tmp.opp0UHYxlK /tmp/tmp.4HqUaaQIL0 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - metadata.name cluster1 + yq w - spec.name cluster1 + yq w - spec.clustername cluster1 + yq w - spec.database cluster1 + yq w - metadata.labels.pg-cluster cluster1 + yq w - metadata.labels.name cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w - spec.user some-name + yq w - spec.secretsName cluster1-users-secret + yq w - spec.keepData false + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + yq w - spec.keepBackups false + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.7mJ7fVRxTk ++ mktemp + local LAST_ERR=/tmp/tmp.Bd1P3HGbsG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.7mJ7fVRxTk perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.Bd1P3HGbsG + rm /tmp/tmp.7mJ7fVRxTk /tmp/tmp.Bd1P3HGbsG + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo.. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1........ + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.REQtqWuJOE +++ mktemp ++ local LAST_ERR=/tmp/tmp.ouOH95Kgcd ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.REQtqWuJOE ++ cat /tmp/tmp.ouOH95Kgcd ++ rm /tmp/tmp.REQtqWuJOE /tmp/tmp.ouOH95Kgcd ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nWdGAdG45m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.11tWO7MCxD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nWdGAdG45m +++ cat /tmp/tmp.11tWO7MCxD +++ rm /tmp/tmp.nWdGAdG45m /tmp/tmp.11tWO7MCxD +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.omzIwsMPzM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.15ck1BJhXY +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.omzIwsMPzM +++ cat /tmp/tmp.15ck1BJhXY +++ rm /tmp/tmp.omzIwsMPzM /tmp/tmp.15ck1BJhXY +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lGO32eFGrv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1YyjvpawkW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.lGO32eFGrv +++ cat /tmp/tmp.1YyjvpawkW +++ rm /tmp/tmp.lGO32eFGrv /tmp/tmp.1YyjvpawkW +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Jyilf1FDu6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wZ4vxzFqYB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Jyilf1FDu6 +++ cat /tmp/tmp.wZ4vxzFqYB +++ rm /tmp/tmp.Jyilf1FDu6 /tmp/tmp.wZ4vxzFqYB +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mWmTnJR8ha +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CBlXJKNTIN ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.mWmTnJR8ha ++++ cat /tmp/tmp.CBlXJKNTIN ++++ rm /tmp/tmp.mWmTnJR8ha /tmp/tmp.CBlXJKNTIN ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L3UR53dIs3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.20d0xk8MVX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.L3UR53dIs3 +++ cat /tmp/tmp.20d0xk8MVX +++ rm /tmp/tmp.L3UR53dIs3 /tmp/tmp.20d0xk8MVX +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lbHr9pNwbx +++ mktemp ++ local LAST_ERR=/tmp/tmp.YXSO5sk1hF ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.lbHr9pNwbx ++ cat /tmp/tmp.YXSO5sk1hF ++ rm /tmp/tmp.lbHr9pNwbx /tmp/tmp.YXSO5sk1hF ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.lWrKbrtpjm ++ mktemp + local LAST_ERR=/tmp/tmp.ANYuTz2MUO + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lWrKbrtpjm + cat /tmp/tmp.ANYuTz2MUO + rm /tmp/tmp.lWrKbrtpjm /tmp/tmp.ANYuTz2MUO + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GSFdv2CiaL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qASvGub9it +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GSFdv2CiaL +++ cat /tmp/tmp.qASvGub9it +++ rm /tmp/tmp.GSFdv2CiaL /tmp/tmp.qASvGub9it +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OLGBibCfRR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.63QF8LHO1A +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OLGBibCfRR +++ cat /tmp/tmp.63QF8LHO1A +++ rm /tmp/tmp.OLGBibCfRR /tmp/tmp.63QF8LHO1A +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dIcczQREW6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iQa7gErwwa +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dIcczQREW6 +++ cat /tmp/tmp.iQa7gErwwa +++ rm /tmp/tmp.dIcczQREW6 /tmp/tmp.iQa7gErwwa +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lcHvzV0mTV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9CSVGdlpAg ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.lcHvzV0mTV ++++ cat /tmp/tmp.9CSVGdlpAg ++++ rm /tmp/tmp.lcHvzV0mTV /tmp/tmp.9CSVGdlpAg ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aCG20Zja4T ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2jas8F3HFi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.aCG20Zja4T +++ cat /tmp/tmp.2jas8F3HFi +++ rm /tmp/tmp.aCG20Zja4T /tmp/tmp.2jas8F3HFi +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.53sEX2GOU2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eYNY2dREWH ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.53sEX2GOU2 ++ cat /tmp/tmp.eYNY2dREWH ++ rm /tmp/tmp.53sEX2GOU2 /tmp/tmp.eYNY2dREWH ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.nVKhsGt52u ++ mktemp + local LAST_ERR=/tmp/tmp.waXC3PYFGd + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nVKhsGt52u + cat /tmp/tmp.waXC3PYFGd + rm /tmp/tmp.nVKhsGt52u /tmp/tmp.waXC3PYFGd + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CM2PGRvxNQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RAMKEq4ush +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CM2PGRvxNQ +++ cat /tmp/tmp.RAMKEq4ush +++ rm /tmp/tmp.CM2PGRvxNQ /tmp/tmp.RAMKEq4ush +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nON5Uz1SKS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xl3vjxiHFP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nON5Uz1SKS +++ cat /tmp/tmp.xl3vjxiHFP +++ rm /tmp/tmp.nON5Uz1SKS /tmp/tmp.xl3vjxiHFP +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UtxOvUbdJM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JNtbovHKvW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UtxOvUbdJM +++ cat /tmp/tmp.JNtbovHKvW +++ rm /tmp/tmp.UtxOvUbdJM /tmp/tmp.JNtbovHKvW +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NebZeY8EHN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hkojOASRtc ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.NebZeY8EHN ++++ cat /tmp/tmp.hkojOASRtc ++++ rm /tmp/tmp.NebZeY8EHN /tmp/tmp.hkojOASRtc ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hwmb63douU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qwx6Q4g1n6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.hwmb63douU +++ cat /tmp/tmp.qwx6Q4g1n6 +++ rm /tmp/tmp.hwmb63douU /tmp/tmp.qwx6Q4g1n6 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G62IFQJASu +++ mktemp ++ local LAST_ERR=/tmp/tmp.FDuxlr3v1T ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.G62IFQJASu ++ cat /tmp/tmp.FDuxlr3v1T ++ rm /tmp/tmp.G62IFQJASu /tmp/tmp.FDuxlr3v1T ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.BpjAYwRwxc ++ mktemp + local LAST_ERR=/tmp/tmp.H9rqej5bYR + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BpjAYwRwxc + cat /tmp/tmp.H9rqej5bYR + rm /tmp/tmp.BpjAYwRwxc /tmp/tmp.H9rqej5bYR + return 0 + sleep 10 + apply_version cluster1 9.9.9 13.12 13 http://version-service:11000 4 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=13.12 + local db_version=13 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=4 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"13.12"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.EZLM4st21T ++ mktemp + local LAST_ERR=/tmp/tmp.e94li305rG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"13.12"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.EZLM4st21T perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.e94li305rG + rm /tmp/tmp.EZLM4st21T /tmp/tmp.e94li305rG + return 0 + sleep 10 + wait_smart_update cluster1 4 + local cluster=cluster1 + local target=4 + set +o xtrace .......................................................... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1 + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.f0CoWjL2Pa ++ mktemp + local LAST_ERR=/tmp/tmp.pUKJdAiKoB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.f0CoWjL2Pa job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.pUKJdAiKoB + rm /tmp/tmp.f0CoWjL2Pa /tmp/tmp.pUKJdAiKoB + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.namespace version-service-31598 + yq w - metadata.name fulldata-cluster1 + yq w --style=single -- - spec.parameters.backrest-opts --type=full + kubectl_bin apply -f - + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ mktemp + local LAST_OUT=/tmp/tmp.sInBpmqi5H +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.SBHutZBXZG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_OUT=/tmp/tmp.qjlI1XhSsL +++ mktemp + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - ++ local LAST_ERR=/tmp/tmp.BFBxQyNjFJ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.qjlI1XhSsL ++ cat /tmp/tmp.BFBxQyNjFJ ++ rm /tmp/tmp.qjlI1XhSsL /tmp/tmp.BFBxQyNjFJ ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-57f668d478-6zlbp + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.sInBpmqi5H pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.SBHutZBXZG + rm /tmp/tmp.sInBpmqi5H /tmp/tmp.SBHutZBXZG + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 13.12 13 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=13.12 + local db_version=13 + sort -s -u + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' ++ mktemp + local LAST_OUT=/tmp/tmp.9v1UOg4pup ++ mktemp + local LAST_ERR=/tmp/tmp.PvuEclRqJe + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9v1UOg4pup + cat /tmp/tmp.PvuEclRqJe + rm /tmp/tmp.9v1UOg4pup /tmp/tmp.PvuEclRqJe + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-13.12-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/13.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'PG cluster with version service offline' + set +o xtrace ----------------------------------------------------------------------------------- PG cluster with version service offline ----------------------------------------------------------------------------------- + PG_VER=13 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zyOF0LcWnS ++ mktemp + local LAST_ERR=/tmp/tmp.ULrRVbIO0a + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zyOF0LcWnS secret/cluster1-users-secret created + cat /tmp/tmp.ULrRVbIO0a + rm /tmp/tmp.zyOF0LcWnS /tmp/tmp.ULrRVbIO0a + return 0 + yq w - metadata.name cluster1-pmm-secret + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.cCSxtoDuo5 ++ mktemp + local LAST_ERR=/tmp/tmp.hnvPieJN4U + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.cCSxtoDuo5 secret/cluster1-pmm-secret configured + cat /tmp/tmp.hnvPieJN4U + rm /tmp/tmp.cCSxtoDuo5 /tmp/tmp.hnvPieJN4U + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uLjEyWPIQN ++ mktemp + local LAST_ERR=/tmp/tmp.l8TmXczHZ3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.uLjEyWPIQN deployment.apps/pg-client unchanged + cat /tmp/tmp.l8TmXczHZ3 + rm /tmp/tmp.uLjEyWPIQN /tmp/tmp.l8TmXczHZ3 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - metadata.labels.deployment-name cluster1 + yq w - metadata.labels.name cluster1 + yq w - metadata.name cluster1 + yq w - spec.keepData false + yq w - spec.keepBackups false + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest + yq w - spec.user some-name + yq w - spec.secretsName cluster1-users-secret + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha + yq w - spec.clustername cluster1 + yq w - spec.database cluster1 + yq w - spec.name cluster1 + yq w - metadata.labels.pg-cluster cluster1 + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.TGoAGJ3Oqa ++ mktemp + local LAST_ERR=/tmp/tmp.PVDOl3gTKM + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TGoAGJ3Oqa perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.PVDOl3gTKM + rm /tmp/tmp.TGoAGJ3Oqa /tmp/tmp.PVDOl3gTKM + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo.. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1...... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' cluster1-stanza-create+ retry=0 ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.64ckj1oOzx +++ mktemp ++ local LAST_ERR=/tmp/tmp.3pwpbiLS1y ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.64ckj1oOzx ++ cat /tmp/tmp.3pwpbiLS1y ++ rm /tmp/tmp.64ckj1oOzx /tmp/tmp.3pwpbiLS1y ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yVnIm2kxv7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P3suKYwt4T +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.yVnIm2kxv7 +++ cat /tmp/tmp.P3suKYwt4T +++ rm /tmp/tmp.yVnIm2kxv7 /tmp/tmp.P3suKYwt4T +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hduC2VIe5S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H0aV6ihbYe +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.hduC2VIe5S +++ cat /tmp/tmp.H0aV6ihbYe +++ rm /tmp/tmp.hduC2VIe5S /tmp/tmp.H0aV6ihbYe +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.alM0Kxc8vA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.np02UsXo7z +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.alM0Kxc8vA +++ cat /tmp/tmp.np02UsXo7z +++ rm /tmp/tmp.alM0Kxc8vA /tmp/tmp.np02UsXo7z +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9Osq7uvVim ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fGQKbBYZoM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9Osq7uvVim +++ cat /tmp/tmp.fGQKbBYZoM +++ rm /tmp/tmp.9Osq7uvVim /tmp/tmp.fGQKbBYZoM +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MaVfPWA70z +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yuLgQ7ik8I ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.MaVfPWA70z ++++ cat /tmp/tmp.yuLgQ7ik8I ++++ rm /tmp/tmp.MaVfPWA70z /tmp/tmp.yuLgQ7ik8I ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cakbEXg7Bz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8Mm5DbJEoc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.cakbEXg7Bz +++ cat /tmp/tmp.8Mm5DbJEoc +++ rm /tmp/tmp.cakbEXg7Bz /tmp/tmp.8Mm5DbJEoc +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CuoKJjkvtM +++ mktemp ++ local LAST_ERR=/tmp/tmp.nEmbaztCXS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.CuoKJjkvtM ++ cat /tmp/tmp.nEmbaztCXS ++ rm /tmp/tmp.CuoKJjkvtM /tmp/tmp.nEmbaztCXS ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.AsIVlBIBfH ++ mktemp + local LAST_ERR=/tmp/tmp.RUh9tZNOV6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.AsIVlBIBfH + cat /tmp/tmp.RUh9tZNOV6 + rm /tmp/tmp.AsIVlBIBfH /tmp/tmp.RUh9tZNOV6 + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1cr4EXyOVg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.snrnld1LD6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1cr4EXyOVg +++ cat /tmp/tmp.snrnld1LD6 +++ rm /tmp/tmp.1cr4EXyOVg /tmp/tmp.snrnld1LD6 +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IQgHoXSVQ9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BnsUH7H3c1 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IQgHoXSVQ9 +++ cat /tmp/tmp.BnsUH7H3c1 +++ rm /tmp/tmp.IQgHoXSVQ9 /tmp/tmp.BnsUH7H3c1 +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uePReaVlVJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AU6xoKZ6ym +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.uePReaVlVJ +++ cat /tmp/tmp.AU6xoKZ6ym +++ rm /tmp/tmp.uePReaVlVJ /tmp/tmp.AU6xoKZ6ym +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hysE2ifC4d +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9ibpEjiCNM ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.hysE2ifC4d ++++ cat /tmp/tmp.9ibpEjiCNM ++++ rm /tmp/tmp.hysE2ifC4d /tmp/tmp.9ibpEjiCNM ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ULZyL8RPrt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bsweUwpynP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ULZyL8RPrt +++ cat /tmp/tmp.bsweUwpynP +++ rm /tmp/tmp.ULZyL8RPrt /tmp/tmp.bsweUwpynP +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A3whttVkJb +++ mktemp ++ local LAST_ERR=/tmp/tmp.9HrMc0Y1BC ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.A3whttVkJb ++ cat /tmp/tmp.9HrMc0Y1BC ++ rm /tmp/tmp.A3whttVkJb /tmp/tmp.9HrMc0Y1BC ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.CZLsOVuVTd ++ mktemp + local LAST_ERR=/tmp/tmp.Hr4fIZERHb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.CZLsOVuVTd + cat /tmp/tmp.Hr4fIZERHb + rm /tmp/tmp.CZLsOVuVTd /tmp/tmp.Hr4fIZERHb + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oFAB8oXcYq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.h3O2B6WS4W +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.oFAB8oXcYq +++ cat /tmp/tmp.h3O2B6WS4W +++ rm /tmp/tmp.oFAB8oXcYq /tmp/tmp.h3O2B6WS4W +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YTeh2J62tR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ENOX9q8iSC +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.YTeh2J62tR +++ cat /tmp/tmp.ENOX9q8iSC +++ rm /tmp/tmp.YTeh2J62tR /tmp/tmp.ENOX9q8iSC +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8YT4OTMH4u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rrPVmMyk1y +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8YT4OTMH4u +++ cat /tmp/tmp.rrPVmMyk1y +++ rm /tmp/tmp.8YT4OTMH4u /tmp/tmp.rrPVmMyk1y +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yThMoltGq1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.wcTQzOvu7r ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.yThMoltGq1 ++++ cat /tmp/tmp.wcTQzOvu7r ++++ rm /tmp/tmp.yThMoltGq1 /tmp/tmp.wcTQzOvu7r ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pwgQzB8Wi4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.90hC4J65Tf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.pwgQzB8Wi4 +++ cat /tmp/tmp.90hC4J65Tf +++ rm /tmp/tmp.pwgQzB8Wi4 /tmp/tmp.90hC4J65Tf +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jctqANsRmV +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y9qSlDdEVS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jctqANsRmV ++ cat /tmp/tmp.Y9qSlDdEVS ++ rm /tmp/tmp.jctqANsRmV /tmp/tmp.Y9qSlDdEVS ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.t6pcHR2RbH ++ mktemp + local LAST_ERR=/tmp/tmp.xRUugJLXz0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.t6pcHR2RbH + cat /tmp/tmp.xRUugJLXz0 + rm /tmp/tmp.t6pcHR2RbH /tmp/tmp.xRUugJLXz0 + return 0 + sleep 10 + apply_version cluster1 9.9.9 latest 1.5.1-13 http://not-a-version-service/ 4 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=latest + local db_version=1.5.1-13 + local vs_endpoint=http://not-a-version-service/ + local update_finished_log_entries=4 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://not-a-version-service/","apply":"latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.oXms9G5Tx2 ++ mktemp + local LAST_ERR=/tmp/tmp.aVsH30F5JD + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://not-a-version-service/","apply":"latest"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oXms9G5Tx2 perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.aVsH30F5JD + rm /tmp/tmp.oXms9G5Tx2 /tmp/tmp.aVsH30F5JD + return 0 + sleep 10 + wait_smart_update cluster1 4 + local cluster=cluster1 + local target=4 + set +o xtrace + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1................ + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.6HrG2woEdl ++ mktemp + local LAST_ERR=/tmp/tmp.V39w4zD1Zu + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.6HrG2woEdl job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.V39w4zD1Zu + rm /tmp/tmp.6HrG2woEdl /tmp/tmp.V39w4zD1Zu + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - metadata.name fulldata-cluster1 + yq w - spec.namespace version-service-31598 + yq w - spec.parameters.pg-cluster cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.parameters.job-name fulldata-cluster1 + yq w --style=single -- - spec.parameters.backrest-opts --type=full + kubectl_bin apply -f - ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ mktemp +++ mktemp + local LAST_OUT=/tmp/tmp.U6ybJe1lTw ++ mktemp ++ local LAST_OUT=/tmp/tmp.9LmXCwouTw +++ mktemp + local LAST_ERR=/tmp/tmp.Wwg44SxVey + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.NzmNGMlxxO ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.9LmXCwouTw ++ cat /tmp/tmp.NzmNGMlxxO ++ rm /tmp/tmp.9LmXCwouTw /tmp/tmp.NzmNGMlxxO ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-65857c588c-pxr64 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.U6ybJe1lTw pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.Wwg44SxVey + rm /tmp/tmp.U6ybJe1lTw /tmp/tmp.Wwg44SxVey + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 latest 1.5.1-13 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=latest + local db_version=1.5.1-13 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.xzmheZoNSj ++ mktemp + local LAST_ERR=/tmp/tmp.4YWMnre2sM + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.xzmheZoNSj + cat /tmp/tmp.4YWMnre2sM + rm /tmp/tmp.xzmheZoNSj /tmp/tmp.4YWMnre2sM + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-latest-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/1.5.1-13.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'Updating PG cluster with version service available but disabled' + set +o xtrace ----------------------------------------------------------------------------------- Updating PG cluster with version service available but disabled ----------------------------------------------------------------------------------- + PG_VER=12 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wkwfDx4OOO ++ mktemp + local LAST_ERR=/tmp/tmp.pYpgext2h3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wkwfDx4OOO secret/cluster1-users-secret created + cat /tmp/tmp.pYpgext2h3 + rm /tmp/tmp.wkwfDx4OOO /tmp/tmp.pYpgext2h3 + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jbhzpjQMpp ++ mktemp + local LAST_ERR=/tmp/tmp.gK3JJqTc1A + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jbhzpjQMpp secret/cluster1-pmm-secret configured + cat /tmp/tmp.gK3JJqTc1A + rm /tmp/tmp.jbhzpjQMpp /tmp/tmp.gK3JJqTc1A + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BN0xhhDHqW ++ mktemp + local LAST_ERR=/tmp/tmp.6eqpb6xhPm + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BN0xhhDHqW deployment.apps/pg-client unchanged + cat /tmp/tmp.6eqpb6xhPm + rm /tmp/tmp.BN0xhhDHqW /tmp/tmp.6eqpb6xhPm + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.secretsName cluster1-users-secret + yq w - spec.user some-name + yq w - spec.keepBackups false + yq w - metadata.labels.deployment-name cluster1 + yq w - spec.keepData false + yq w - metadata.labels.pg-cluster cluster1 + yq w - spec.name cluster1 + yq w - spec.database cluster1 + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + yq w - spec.clustername cluster1 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + yq w - metadata.labels.name cluster1 + yq w - metadata.name cluster1 + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.OHdlbaPodn ++ mktemp + local LAST_ERR=/tmp/tmp.CqVVYQmy8Y + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.OHdlbaPodn perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.CqVVYQmy8Y + rm /tmp/tmp.OHdlbaPodn /tmp/tmp.CqVVYQmy8Y + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo.. + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1.......... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DkFC3Z4XfD +++ mktemp ++ local LAST_ERR=/tmp/tmp.tOzrEikTTO ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.DkFC3Z4XfD ++ cat /tmp/tmp.tOzrEikTTO ++ rm /tmp/tmp.DkFC3Z4XfD /tmp/tmp.tOzrEikTTO ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pPpgcKW9Ts ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gosSbMxvpv +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.pPpgcKW9Ts +++ cat /tmp/tmp.gosSbMxvpv +++ rm /tmp/tmp.pPpgcKW9Ts /tmp/tmp.gosSbMxvpv +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OLKEaFoPwH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dqae9ciodi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OLKEaFoPwH +++ cat /tmp/tmp.dqae9ciodi +++ rm /tmp/tmp.OLKEaFoPwH /tmp/tmp.dqae9ciodi +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ekJeE2dryP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.d4YlXgUE2q +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ekJeE2dryP +++ cat /tmp/tmp.d4YlXgUE2q +++ rm /tmp/tmp.ekJeE2dryP /tmp/tmp.d4YlXgUE2q +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j9m5ZKC5dV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i69HsUNKFx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.j9m5ZKC5dV +++ cat /tmp/tmp.i69HsUNKFx +++ rm /tmp/tmp.j9m5ZKC5dV /tmp/tmp.i69HsUNKFx +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5YhTtWgGBS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NEWNkr7sbj ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.5YhTtWgGBS ++++ cat /tmp/tmp.NEWNkr7sbj ++++ rm /tmp/tmp.5YhTtWgGBS /tmp/tmp.NEWNkr7sbj ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kJwzGRYez1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nA7xxAUHXR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kJwzGRYez1 +++ cat /tmp/tmp.nA7xxAUHXR +++ rm /tmp/tmp.kJwzGRYez1 /tmp/tmp.nA7xxAUHXR +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ti3xBeEAW5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tk3x1quGJZ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Ti3xBeEAW5 ++ cat /tmp/tmp.tk3x1quGJZ ++ rm /tmp/tmp.Ti3xBeEAW5 /tmp/tmp.tk3x1quGJZ ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.3xJkoeAJLn ++ mktemp + local LAST_ERR=/tmp/tmp.m9QvpGt4dC + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.3xJkoeAJLn + cat /tmp/tmp.m9QvpGt4dC + rm /tmp/tmp.3xJkoeAJLn /tmp/tmp.m9QvpGt4dC + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OoPKi8h9Xx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.unP8vkSPNE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OoPKi8h9Xx +++ cat /tmp/tmp.unP8vkSPNE +++ rm /tmp/tmp.OoPKi8h9Xx /tmp/tmp.unP8vkSPNE +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TVVxAgVIxw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0T7lVnzgsM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TVVxAgVIxw +++ cat /tmp/tmp.0T7lVnzgsM +++ rm /tmp/tmp.TVVxAgVIxw /tmp/tmp.0T7lVnzgsM +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yB2KRah7lv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o707XPUtIB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.yB2KRah7lv +++ cat /tmp/tmp.o707XPUtIB +++ rm /tmp/tmp.yB2KRah7lv /tmp/tmp.o707XPUtIB +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.BJncPlznZN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LUmE4dLJ6K ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.BJncPlznZN ++++ cat /tmp/tmp.LUmE4dLJ6K ++++ rm /tmp/tmp.BJncPlznZN /tmp/tmp.LUmE4dLJ6K ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sF5LRcJ0Sh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aYeaQQjN7q +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.sF5LRcJ0Sh +++ cat /tmp/tmp.aYeaQQjN7q +++ rm /tmp/tmp.sF5LRcJ0Sh /tmp/tmp.aYeaQQjN7q +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.obI3cSgf7p +++ mktemp ++ local LAST_ERR=/tmp/tmp.jOY26IwJo8 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.obI3cSgf7p ++ cat /tmp/tmp.jOY26IwJo8 ++ rm /tmp/tmp.obI3cSgf7p /tmp/tmp.jOY26IwJo8 ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.8f9oE48lUx ++ mktemp + local LAST_ERR=/tmp/tmp.E6QcJvZJlG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.8f9oE48lUx + cat /tmp/tmp.E6QcJvZJlG + rm /tmp/tmp.8f9oE48lUx /tmp/tmp.E6QcJvZJlG + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jQ6qI0jvnD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gb1i4CwOLq +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jQ6qI0jvnD +++ cat /tmp/tmp.gb1i4CwOLq +++ rm /tmp/tmp.jQ6qI0jvnD /tmp/tmp.gb1i4CwOLq +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m3USCPjvzD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jVFXR6U08B +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.m3USCPjvzD +++ cat /tmp/tmp.jVFXR6U08B +++ rm /tmp/tmp.m3USCPjvzD /tmp/tmp.jVFXR6U08B +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.h7v76fu8g7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ov0c76k8s6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.h7v76fu8g7 +++ cat /tmp/tmp.Ov0c76k8s6 +++ rm /tmp/tmp.h7v76fu8g7 /tmp/tmp.Ov0c76k8s6 +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.72nnIFFl9S +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.m73mjNd87n ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.72nnIFFl9S ++++ cat /tmp/tmp.m73mjNd87n ++++ rm /tmp/tmp.72nnIFFl9S /tmp/tmp.m73mjNd87n ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4r4zJM6LAa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O8MfqyGIEN +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.4r4zJM6LAa +++ cat /tmp/tmp.O8MfqyGIEN +++ rm /tmp/tmp.4r4zJM6LAa /tmp/tmp.O8MfqyGIEN +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9NQ3yqsUjF +++ mktemp ++ local LAST_ERR=/tmp/tmp.uaD0ZC1WcP ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.9NQ3yqsUjF ++ cat /tmp/tmp.uaD0ZC1WcP ++ rm /tmp/tmp.9NQ3yqsUjF /tmp/tmp.uaD0ZC1WcP ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.aHLS15VQ8u ++ mktemp + local LAST_ERR=/tmp/tmp.wx4STXHPcg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.aHLS15VQ8u + cat /tmp/tmp.wx4STXHPcg + rm /tmp/tmp.aHLS15VQ8u /tmp/tmp.wx4STXHPcg + return 0 + sleep 10 + apply_version cluster1 9.9.9 disabled 1.5.1-12 http://version-service:11000 4 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=disabled + local db_version=1.5.1-12 + local vs_endpoint=http://version-service:11000 + local update_finished_log_entries=4 + kubectl_bin patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"disabled"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.GO9mVrY3O1 ++ mktemp + local LAST_ERR=/tmp/tmp.eFdliSrrSS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl patch perconapgcluster/cluster1 --type json '-p=[ {"op":"add","path":"/spec/upgradeOptions","value":{"versionServiceEndpoint":"http://version-service:11000","apply":"disabled"}}, {"op":"replace","path":"/metadata/labels/pgo-version","value":"9.9.9"}, {"op":"replace","path":"/spec/userLabels/pgo-version","value":"9.9.9"} ]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.GO9mVrY3O1 perconapgcluster.pg.percona.com/cluster1 patched + cat /tmp/tmp.eFdliSrrSS + rm /tmp/tmp.GO9mVrY3O1 /tmp/tmp.eFdliSrrSS + return 0 + sleep 10 + wait_smart_update cluster1 4 + local cluster=cluster1 + local target=4 + set +o xtrace + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1........... + kubectl_bin delete jobs --selector=pg-cluster=cluster1 ++ mktemp + local LAST_OUT=/tmp/tmp.G5yxnqeXsS ++ mktemp + local LAST_ERR=/tmp/tmp.t3uCeT1iBF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete jobs --selector=pg-cluster=cluster1 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.G5yxnqeXsS job.batch "backrest-backup-cluster1" deleted + cat /tmp/tmp.t3uCeT1iBF + rm /tmp/tmp.G5yxnqeXsS /tmp/tmp.t3uCeT1iBF + return 0 + create_backup cluster1 fulldata full + local cluster=cluster1 + local bckp_prefix=fulldata + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster cluster1 + yq w - metadata.name fulldata-cluster1 + yq w - spec.name fulldata-cluster1 + yq w - spec.namespace version-service-31598 + kubectl_bin apply -f - + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.job-name fulldata-cluster1 + yq w - spec.parameters.pg-cluster cluster1 ++ mktemp ++ kubectl_bin get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' +++ mktemp + local LAST_OUT=/tmp/tmp.WnSOwSblrv ++ mktemp + local LAST_ERR=/tmp/tmp.DiU7Eut9nP + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_OUT=/tmp/tmp.kkzeRlJ5wb + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f - +++ mktemp ++ local LAST_ERR=/tmp/tmp.CpAP1iNg3K ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=cluster1-backrest-shared-repo,pg-cluster=cluster1 -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.kkzeRlJ5wb ++ cat /tmp/tmp.CpAP1iNg3K ++ rm /tmp/tmp.kkzeRlJ5wb /tmp/tmp.CpAP1iNg3K ++ return 0 + yq w - spec.parameters.podname cluster1-backrest-shared-repo-85c794b5cb-cq9x6 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WnSOwSblrv pgtask.pg.percona.com/fulldata-cluster1 created + cat /tmp/tmp.DiU7Eut9nP + rm /tmp/tmp.WnSOwSblrv /tmp/tmp.DiU7Eut9nP + return 0 + sleep 10 + wait_job_completion fulldata-cluster1 + local job=fulldata-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' fulldata-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/fulldata-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + verify_applied_version cluster1 9.9.9 disabled 1.5.1-12 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=disabled + local db_version=1.5.1-12 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.uyYDdvyEKr ++ mktemp + local LAST_ERR=/tmp/tmp.hqq2g4CXep + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.uyYDdvyEKr + cat /tmp/tmp.hqq2g4CXep + rm /tmp/tmp.uyYDdvyEKr /tmp/tmp.hqq2g4CXep + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-disabled-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/1.5.1-12.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ...Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + desc 'Start the cluster up with version service images' + set +o xtrace ----------------------------------------------------------------------------------- Start the cluster up with version service images ----------------------------------------------------------------------------------- + PG_VER=12 + IMAGE_PGBOUNCER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + IMAGE_PG_HA=percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + IMAGE_BACKREST=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + IMAGE_BACKREST_REPO=percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + IMAGE_PGBADGER=percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w -i /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.labels.pgo-version 9.9.9 + yq w -i /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml spec.userLabels.pgo-version 9.9.9 + yq w -i /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml spec.upgradeOptions.versionServiceEndpoint http://version-service:11000 + yq w -i --style=single /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml spec.upgradeOptions.apply recommended + spinup_pgcluster cluster1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local cluster=cluster1 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=version-service-31598 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets cluster1 version-service-31598 + local cluster=cluster1 + local namespace=version-service-31598 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + yq w - metadata.name cluster1-users-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qPxC2GOJXk ++ mktemp + local LAST_ERR=/tmp/tmp.HeU5PPHh2J + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qPxC2GOJXk secret/cluster1-users-secret created + cat /tmp/tmp.HeU5PPHh2J + rm /tmp/tmp.qPxC2GOJXk /tmp/tmp.HeU5PPHh2J + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name cluster1-pmm-secret + kubectl_bin -n version-service-31598 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fKwLaMTlt5 ++ mktemp + local LAST_ERR=/tmp/tmp.zRlyJv2U7t + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.fKwLaMTlt5 secret/cluster1-pmm-secret configured + cat /tmp/tmp.zRlyJv2U7t + rm /tmp/tmp.fKwLaMTlt5 /tmp/tmp.zRlyJv2U7t + return 0 + kubectl_bin -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.B7wrebFJYE ++ mktemp + local LAST_ERR=/tmp/tmp.7SBF4qH1a9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.B7wrebFJYE deployment.apps/pg-client unchanged + cat /tmp/tmp.7SBF4qH1a9 + rm /tmp/tmp.B7wrebFJYE /tmp/tmp.7SBF4qH1a9 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml cluster1 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml + local name=cluster1 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/conf/1.5.1_cr.yaml metadata.annotations.current-primary cluster1 + yq w - metadata.labels.crunchy-pgha-scope cluster1 + yq w - spec.name cluster1 + yq w - metadata.labels.name cluster1 + yq w - spec.keepData false + yq w - spec.user some-name + yq w - spec.database cluster1 + yq w - spec.keepBackups false + yq w - spec.secretsName cluster1-users-secret + yq w - spec.pgBadger.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger + yq w - spec.pgPrimary.image percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha + yq w - spec.clustername cluster1 + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest + yq w - metadata.labels.pg-cluster cluster1 + yq w - metadata.name cluster1 + yq w - spec.backup.backrestRepoImage percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo + yq w - metadata.labels.deployment-name cluster1 + yq w - spec.pgBouncer.image percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 ++ mktemp + local LAST_OUT=/tmp/tmp.0tqkzkrErB ++ mktemp + local LAST_ERR=/tmp/tmp.3ZNkQKcINi + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/cr.yaml -n version-service-31598 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0tqkzkrErB perconapgcluster.pg.percona.com/cluster1 created + cat /tmp/tmp.3ZNkQKcINi + rm /tmp/tmp.0tqkzkrErB /tmp/tmp.3ZNkQKcINi + return 0 + wait_deployment cluster1-backrest-shared-repo + local name=cluster1-backrest-shared-repo + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1-backrest-shared-repo.... + wait_deployment cluster1 + local name=cluster1 + local target_namespace=version-service-31598 + sleep 10 + set +o xtrace cluster1...................... + [[ false == \g\c\s\+* ]] + wait_job_completion cluster1-stanza-create false + local job=cluster1-stanza-create + local mandatory=false + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 2 -ge 120 ]] + [[ 2 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 3 -ge 120 ]] + [[ 3 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 4 -ge 120 ]] + [[ 4 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 5 -ge 120 ]] + [[ 5 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 6 -ge 120 ]] + [[ 6 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 7 -ge 120 ]] + [[ 7 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 8 -ge 120 ]] + [[ 8 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 9 -ge 120 ]] + [[ 9 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 10 -ge 120 ]] + [[ 10 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 11 -ge 120 ]] + [[ 11 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 12 -ge 120 ]] + [[ 12 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 13 -ge 120 ]] + [[ 13 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 14 -ge 120 ]] + [[ 14 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 15 -ge 120 ]] + [[ 15 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 16 -ge 120 ]] + [[ 16 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 17 -ge 120 ]] + [[ 17 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 18 -ge 120 ]] + [[ 18 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 19 -ge 120 ]] + [[ 19 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 20 -ge 120 ]] + [[ 20 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 21 -ge 120 ]] + [[ 21 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 22 -ge 120 ]] + [[ 22 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 23 -ge 120 ]] + [[ 23 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 24 -ge 120 ]] + [[ 24 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 25 -ge 120 ]] + [[ 25 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 26 -ge 120 ]] + [[ 26 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 27 -ge 120 ]] + [[ 27 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 28 -ge 120 ]] + [[ 28 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 29 -ge 120 ]] + [[ 29 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 30 -ge 120 ]] + [[ 30 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 31 -ge 120 ]] + [[ 31 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 32 -ge 120 ]] + [[ 32 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 33 -ge 120 ]] + [[ 33 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 34 -ge 120 ]] + [[ 34 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 35 -ge 120 ]] + [[ 35 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 36 -ge 120 ]] + [[ 36 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 37 -ge 120 ]] + [[ 37 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 38 -ge 120 ]] + [[ 38 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 39 -ge 120 ]] + [[ 39 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 40 -ge 120 ]] + [[ 40 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 41 -ge 120 ]] + [[ 41 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 42 -ge 120 ]] + [[ 42 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 43 -ge 120 ]] + [[ 43 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 44 -ge 120 ]] + [[ 44 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 45 -ge 120 ]] + [[ 45 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 46 -ge 120 ]] + [[ 46 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 47 -ge 120 ]] + [[ 47 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 48 -ge 120 ]] + [[ 48 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 49 -ge 120 ]] + [[ 49 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 50 -ge 120 ]] + [[ 50 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 51 -ge 120 ]] + [[ 51 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 52 -ge 120 ]] + [[ 52 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 53 -ge 120 ]] + [[ 53 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 54 -ge 120 ]] + [[ 54 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 55 -ge 120 ]] + [[ 55 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 56 -ge 120 ]] + [[ 56 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 57 -ge 120 ]] + [[ 57 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 58 -ge 120 ]] + [[ 58 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 59 -ge 120 ]] + [[ 59 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 60 -ge 120 ]] + [[ 60 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 61 -ge 120 ]] + [[ 61 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 62 -ge 120 ]] + [[ 62 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 63 -ge 120 ]] + [[ 63 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 64 -ge 120 ]] + [[ 64 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 65 -ge 120 ]] + [[ 65 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 66 -ge 120 ]] + [[ 66 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 67 -ge 120 ]] + [[ 67 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 68 -ge 120 ]] + [[ 68 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 69 -ge 120 ]] + [[ 69 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 70 -ge 120 ]] + [[ 70 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 71 -ge 120 ]] + [[ 71 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 72 -ge 120 ]] + [[ 72 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 73 -ge 120 ]] + [[ 73 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 74 -ge 120 ]] + [[ 74 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 75 -ge 120 ]] + [[ 75 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 76 -ge 120 ]] + [[ 76 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 77 -ge 120 ]] + [[ 77 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 78 -ge 120 ]] + [[ 78 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 79 -ge 120 ]] + [[ 79 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 80 -ge 120 ]] + [[ 80 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 81 -ge 120 ]] + [[ 81 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 82 -ge 120 ]] + [[ 82 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 83 -ge 120 ]] + [[ 83 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 84 -ge 120 ]] + [[ 84 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 85 -ge 120 ]] + [[ 85 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 86 -ge 120 ]] + [[ 86 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 87 -ge 120 ]] + [[ 87 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 88 -ge 120 ]] + [[ 88 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 89 -ge 120 ]] + [[ 89 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 90 -ge 120 ]] + [[ 90 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 91 -ge 120 ]] + [[ 91 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 92 -ge 120 ]] + [[ 92 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 93 -ge 120 ]] + [[ 93 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 94 -ge 120 ]] + [[ 94 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 95 -ge 120 ]] + [[ 95 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 96 -ge 120 ]] + [[ 96 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 97 -ge 120 ]] + [[ 97 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 98 -ge 120 ]] + [[ 98 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 99 -ge 120 ]] + [[ 99 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 100 -ge 120 ]] + [[ 100 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 101 -ge 120 ]] + [[ 101 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 102 -ge 120 ]] + [[ 102 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 103 -ge 120 ]] + [[ 103 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 104 -ge 120 ]] + [[ 104 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 105 -ge 120 ]] + [[ 105 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 106 -ge 120 ]] + [[ 106 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 107 -ge 120 ]] + [[ 107 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 108 -ge 120 ]] + [[ 108 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 109 -ge 120 ]] + [[ 109 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 110 -ge 120 ]] + [[ 110 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 111 -ge 120 ]] + [[ 111 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 112 -ge 120 ]] + [[ 112 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 113 -ge 120 ]] + [[ 113 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 114 -ge 120 ]] + [[ 114 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 115 -ge 120 ]] + [[ 115 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 116 -ge 120 ]] + [[ 116 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 117 -ge 120 ]] + [[ 117 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 118 -ge 120 ]] + [[ 118 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 119 -ge 120 ]] + [[ 119 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/cluster1-stanza-create -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 120 -ge 120 ]] + [[ false == \t\r\u\e ]] + [[ 120 -ge 120 ]] + [[ false != \t\r\u\e ]] + echo Can not detect job. Passing by. Can not detect job. Passing by. + set -o xtrace + return 0 + wait_job_completion backrest-backup-cluster1 + local job=backrest-backup-cluster1 + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' backrest-backup-cluster1+ retry=0 ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/backrest-backup-cluster1 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency cluster1 + cluster_name=cluster1 + wait_cluster_status cluster1 'pgcluster Initialized' + local cluster_name=cluster1 + local 'target_status=pgcluster Initialized' + local target_namespace=version-service-31598 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8keUFkQfcc +++ mktemp ++ local LAST_ERR=/tmp/tmp.daDvI6r332 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.8keUFkQfcc ++ cat /tmp/tmp.daDvI6r332 ++ rm /tmp/tmp.8keUFkQfcc /tmp/tmp.daDvI6r332 ++ return 0 + status_query='kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jTW5rdTEBF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ieRpYuTzOZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jTW5rdTEBF +++ cat /tmp/tmp.ieRpYuTzOZ +++ rm /tmp/tmp.jTW5rdTEBF /tmp/tmp.ieRpYuTzOZ +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ymjc89zRGP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iaZWbeXXvM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Ymjc89zRGP +++ cat /tmp/tmp.iaZWbeXXvM +++ rm /tmp/tmp.Ymjc89zRGP /tmp/tmp.iaZWbeXXvM +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3Mi9eqzzGo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GY1kz5bXQW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.3Mi9eqzzGo +++ cat /tmp/tmp.GY1kz5bXQW +++ rm /tmp/tmp.3Mi9eqzzGo /tmp/tmp.GY1kz5bXQW +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qcfFxBhXhI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4hFq6FeNRT +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qcfFxBhXhI +++ cat /tmp/tmp.4hFq6FeNRT +++ rm /tmp/tmp.qcfFxBhXhI /tmp/tmp.4hFq6FeNRT +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7JaQQtooif +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dgC47KzmA2 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.7JaQQtooif ++++ cat /tmp/tmp.dgC47KzmA2 ++++ rm /tmp/tmp.7JaQQtooif /tmp/tmp.dgC47KzmA2 ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5oQVD4LEOl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FwciiPwmoj +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.5oQVD4LEOl +++ cat /tmp/tmp.FwciiPwmoj +++ rm /tmp/tmp.5oQVD4LEOl /tmp/tmp.FwciiPwmoj +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@cluster1.version-service-31598 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dlqC6E0kJw +++ mktemp ++ local LAST_ERR=/tmp/tmp.9Ph4DWxS1b ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.dlqC6E0kJw ++ cat /tmp/tmp.9Ph4DWxS1b ++ rm /tmp/tmp.dlqC6E0kJw /tmp/tmp.9Ph4DWxS1b ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.kTgKjGyIG3 ++ mktemp + local LAST_ERR=/tmp/tmp.HwcRoEoeqZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.kTgKjGyIG3 + cat /tmp/tmp.HwcRoEoeqZ + rm /tmp/tmp.kTgKjGyIG3 /tmp/tmp.HwcRoEoeqZ + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dyOmDaOrOq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.meKuePciuO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dyOmDaOrOq +++ cat /tmp/tmp.meKuePciuO +++ rm /tmp/tmp.dyOmDaOrOq /tmp/tmp.meKuePciuO +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9wEIg6eNAW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZOlX8ty9Me +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9wEIg6eNAW +++ cat /tmp/tmp.ZOlX8ty9Me +++ rm /tmp/tmp.9wEIg6eNAW /tmp/tmp.ZOlX8ty9Me +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IJDMxhhpaT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.adSRhDQBJi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IJDMxhhpaT +++ cat /tmp/tmp.adSRhDQBJi +++ rm /tmp/tmp.IJDMxhhpaT /tmp/tmp.adSRhDQBJi +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vdaWNEiUwB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1ZbZw8RIGU ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.vdaWNEiUwB ++++ cat /tmp/tmp.1ZbZw8RIGU ++++ rm /tmp/tmp.vdaWNEiUwB /tmp/tmp.1ZbZw8RIGU ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1dAz3c3uOi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.J2kWLzmNIz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1dAz3c3uOi +++ cat /tmp/tmp.J2kWLzmNIz +++ rm /tmp/tmp.1dAz3c3uOi /tmp/tmp.J2kWLzmNIz +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gBFsaRChW6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zxbIMBi3Cf ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.gBFsaRChW6 ++ cat /tmp/tmp.zxbIMBi3Cf ++ rm /tmp/tmp.gBFsaRChW6 /tmp/tmp.zxbIMBi3Cf ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.wg8PD2Mb71 ++ mktemp + local LAST_ERR=/tmp/tmp.xfVLGFdLYR + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wg8PD2Mb71 + cat /tmp/tmp.xfVLGFdLYR + rm /tmp/tmp.wg8PD2Mb71 /tmp/tmp.xfVLGFdLYR + return 0 ++ get_psql_user_pass postgres cluster1 ++ local user=postgres ++ local cluster=cluster1 ++ local namespace=version-service-31598 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nIekQaABju ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nFbhtyBU9F +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.nIekQaABju +++ cat /tmp/tmp.nFbhtyBU9F +++ rm /tmp/tmp.nIekQaABju /tmp/tmp.nFbhtyBU9F +++ return 0 ++ [[ xcluster1 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iA27bRujdq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rm47KsOoJi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl get perconapgcluster/cluster1 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iA27bRujdq +++ cat /tmp/tmp.rm47KsOoJi +++ rm /tmp/tmp.iA27bRujdq /tmp/tmp.rm47KsOoJi +++ return 0 +++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ELJ6YqO3p1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3B2LCq2UmI +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ELJ6YqO3p1 +++ cat /tmp/tmp.3B2LCq2UmI +++ rm /tmp/tmp.ELJ6YqO3p1 /tmp/tmp.3B2LCq2UmI +++ return 0 ++ [[ xcluster1-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4Sbnjvabsl +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UVYmzOVlyx ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++++ kubectl -n version-service-31598 get perconapgcluster/cluster1 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.4Sbnjvabsl ++++ cat /tmp/tmp.UVYmzOVlyx ++++ rm /tmp/tmp.4Sbnjvabsl /tmp/tmp.UVYmzOVlyx ++++ return 0 +++ kubectl_bin -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SgrgOVmqjR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W0v8mY6EjD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service +++ kubectl -n version-service-31598 get secret/cluster1-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.SgrgOVmqjR +++ cat /tmp/tmp.W0v8mY6EjD +++ rm /tmp/tmp.SgrgOVmqjR /tmp/tmp.W0v8mY6EjD +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@cluster1.version-service-31598 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@cluster1.version-service-31598 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XO6PgGItK1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XNI5NCayIe ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl -n version-service-31598 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.XO6PgGItK1 ++ cat /tmp/tmp.XNI5NCayIe ++ rm /tmp/tmp.XO6PgGItK1 /tmp/tmp.XNI5NCayIe ++ return 0 + local client_container=pg-client-76df9df876-hdmb4 + kubectl_bin exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.uuTctUDr2T ++ mktemp + local LAST_ERR=/tmp/tmp.z0f6cUxHtk + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl exec pg-client-76df9df876-hdmb4 -n version-service-31598 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@cluster1.version-service-31598.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.uuTctUDr2T + cat /tmp/tmp.z0f6cUxHtk + rm /tmp/tmp.uuTctUDr2T /tmp/tmp.z0f6cUxHtk + return 0 + sleep 10 + verify_applied_version cluster1 9.9.9 recommended 14 + local cluster=cluster1 + local version_to_upgrade=9.9.9 + local release_channel=recommended + local db_version=14 + kubectl_bin get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + sort -s -u ++ mktemp + local LAST_OUT=/tmp/tmp.aE9GtGYOsK ++ mktemp + local LAST_ERR=/tmp/tmp.Zi4QNMwhdy + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get deployments,jobs --selector=pg-cluster=cluster1 -o 'jsonpath={range .items[*]}{range .spec.template.spec.containers[*]}{.image}{"\n"}{end}{end}' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.aE9GtGYOsK + cat /tmp/tmp.Zi4QNMwhdy + rm /tmp/tmp.aE9GtGYOsK /tmp/tmp.Zi4QNMwhdy + return 0 + diff -u /tmp/tmp.ZAvsDSImr9/9.9.9-deployed-recommended-images.txt /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/version-service/compare/14.txt + kubectl delete perconapgcluster/cluster1 perconapgcluster.pg.percona.com "cluster1" deleted + wait_for_delete deployment/cluster1 + local res=deployment/cluster1 + set +o xtrace deployment/cluster1 - ....Error from server (NotFound): deployments.apps "cluster1" not found + wait_for_delete deployment/cluster1-backrest-shared-repo + local res=deployment/cluster1-backrest-shared-repo + set +o xtrace deployment/cluster1-backrest-shared-repo - Error from server (NotFound): deployments.apps "cluster1-backrest-shared-repo" not found + destroy version-service-31598 + grep -v level=info + grep -v level=debug ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp + grep -v 'Getting tasks for pod' + grep -v 'Getting pods from source' + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' ++ local LAST_OUT=/tmp/tmp.PMEaLfsg2v + tee /tmp/tmp.ZAvsDSImr9/operator.log + sort -u +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yk9JcFqJk6 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.PMEaLfsg2v ++ cat /tmp/tmp.Yk9JcFqJk6 ++ rm /tmp/tmp.PMEaLfsg2v /tmp/tmp.Yk9JcFqJk6 ++ return 0 + kubectl_bin logs postgres-operator-5f948d9c4f-7kkjr -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.yrAmbmx5jJ ++ mktemp + local LAST_ERR=/tmp/tmp.SeZTqpXAU1 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl logs postgres-operator-5f948d9c4f-7kkjr -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yrAmbmx5jJ + cat /tmp/tmp.SeZTqpXAU1 + rm /tmp/tmp.yrAmbmx5jJ /tmp/tmp.SeZTqpXAU1 + return 0 ] { } }, ] } }, { } }, ] { } }, { } ] } { } }, ] } ] { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ,{ ], { } }, }], { } }, }], } ], { } }, } ] ], } } ], }, }, { } }, { } }, }, 2024/04/23 13:48:21 INF 1 (localhost:4150) connecting to nsqd 2024/04/23 13:49:44 INF 2 (localhost:4150) connecting to nsqd 2024/04/23 13:49:45 INF 3 (localhost:4150) connecting to nsqd 2024/04/23 13:50:12 INF 4 (localhost:4150) connecting to nsqd 2024/04/23 13:50:40 INF 5 (localhost:4150) connecting to nsqd 2024/04/23 13:50:41 INF 6 (localhost:4150) connecting to nsqd 2024/04/23 13:50:41 INF 7 (localhost:4150) connecting to nsqd 2024/04/23 13:56:15 INF 8 (localhost:4150) connecting to nsqd 2024/04/23 13:57:47 INF 9 (localhost:4150) connecting to nsqd 2024/04/23 13:58:16 INF 10 (localhost:4150) connecting to nsqd 2024/04/23 13:58:18 INF 11 (localhost:4150) connecting to nsqd 2024/04/23 13:58:38 INF 12 (localhost:4150) connecting to nsqd 2024/04/23 13:58:44 INF 13 (localhost:4150) connecting to nsqd 2024/04/23 13:59:00 INF 14 (localhost:4150) connecting to nsqd 2024/04/23 14:00:14 INF 15 (localhost:4150) connecting to nsqd 2024/04/23 14:00:16 INF 16 (localhost:4150) connecting to nsqd 2024/04/23 14:00:34 INF 17 (localhost:4150) connecting to nsqd 2024/04/23 14:01:03 INF 18 (localhost:4150) connecting to nsqd 2024/04/23 14:01:03 INF 19 (localhost:4150) connecting to nsqd 2024/04/23 14:01:04 INF 20 (localhost:4150) connecting to nsqd 2024/04/23 14:10:09 INF 21 (localhost:4150) connecting to nsqd 2024/04/23 14:11:48 INF 22 (localhost:4150) connecting to nsqd 2024/04/23 14:12:19 INF 23 (localhost:4150) connecting to nsqd 2024/04/23 14:12:23 INF 24 (localhost:4150) connecting to nsqd 2024/04/23 14:12:48 INF 25 (localhost:4150) connecting to nsqd 2024/04/23 14:12:53 INF 26 (localhost:4150) connecting to nsqd 2024/04/23 14:13:07 INF 27 (localhost:4150) connecting to nsqd 2024/04/23 14:13:54 INF 28 (localhost:4150) connecting to nsqd 2024/04/23 14:13:55 INF 29 (localhost:4150) connecting to nsqd 2024/04/23 14:14:02 INF 30 (localhost:4150) connecting to nsqd 2024/04/23 14:14:28 INF 31 (localhost:4150) connecting to nsqd 2024/04/23 14:14:29 INF 32 (localhost:4150) connecting to nsqd 2024/04/23 14:14:30 INF 33 (localhost:4150) connecting to nsqd 2024/04/23 14:21:24 INF 34 (localhost:4150) connecting to nsqd 2024/04/23 14:21:51 INF 35 (localhost:4150) connecting to nsqd 2024/04/23 14:21:55 INF 36 (localhost:4150) connecting to nsqd 2024/04/23 14:22:19 INF 37 (localhost:4150) connecting to nsqd 2024/04/23 14:22:24 INF 38 (localhost:4150) connecting to nsqd 2024/04/23 14:22:37 INF 39 (localhost:4150) connecting to nsqd 2024/04/23 14:23:23 INF 40 (localhost:4150) connecting to nsqd 2024/04/23 14:23:24 INF 41 (localhost:4150) connecting to nsqd 2024/04/23 14:23:30 INF 42 (localhost:4150) connecting to nsqd 2024/04/23 14:23:59 INF 43 (localhost:4150) connecting to nsqd 2024/04/23 14:24:00 INF 44 (localhost:4150) connecting to nsqd 2024/04/23 14:24:00 INF 45 (localhost:4150) connecting to nsqd 2024/04/23 14:28:12 INF 46 (localhost:4150) connecting to nsqd 2024/04/23 14:29:48 INF 47 (localhost:4150) connecting to nsqd 2024/04/23 14:30:15 INF 48 (localhost:4150) connecting to nsqd 2024/04/23 14:30:18 INF 49 (localhost:4150) connecting to nsqd 2024/04/23 14:30:38 INF 50 (localhost:4150) connecting to nsqd 2024/04/23 14:30:44 INF 51 (localhost:4150) connecting to nsqd 2024/04/23 14:31:00 INF 52 (localhost:4150) connecting to nsqd 2024/04/23 14:31:50 INF 53 (localhost:4150) connecting to nsqd 2024/04/23 14:31:51 INF 54 (localhost:4150) connecting to nsqd 2024/04/23 14:31:58 INF 55 (localhost:4150) connecting to nsqd 2024/04/23 14:32:26 INF 56 (localhost:4150) connecting to nsqd 2024/04/23 14:32:26 INF 57 (localhost:4150) connecting to nsqd 2024/04/23 14:32:27 INF 58 (localhost:4150) connecting to nsqd 2024/04/23 14:39:57 INF 59 (localhost:4150) connecting to nsqd 2024/04/23 14:41:33 INF 60 (localhost:4150) connecting to nsqd 2024/04/23 14:41:47 INF 61 (localhost:4150) connecting to nsqd 2024/04/23 14:41:49 INF 62 (localhost:4150) connecting to nsqd 2024/04/23 14:42:12 INF 63 (localhost:4150) connecting to nsqd 2024/04/23 14:42:17 INF 64 (localhost:4150) connecting to nsqd 2024/04/23 14:42:31 INF 65 (localhost:4150) connecting to nsqd 2024/04/23 14:43:13 INF 66 (localhost:4150) connecting to nsqd 2024/04/23 14:43:15 INF 67 (localhost:4150) connecting to nsqd 2024/04/23 14:43:22 INF 68 (localhost:4150) connecting to nsqd 2024/04/23 14:43:47 INF 69 (localhost:4150) connecting to nsqd 2024/04/23 14:43:48 INF 70 (localhost:4150) connecting to nsqd 2024/04/23 14:43:48 INF 71 (localhost:4150) connecting to nsqd 2024/04/23 14:51:26 INF 72 (localhost:4150) connecting to nsqd 2024/04/23 14:53:03 INF 73 (localhost:4150) connecting to nsqd 2024/04/23 14:53:18 INF 74 (localhost:4150) connecting to nsqd 2024/04/23 14:53:21 INF 75 (localhost:4150) connecting to nsqd 2024/04/23 14:53:45 INF 76 (localhost:4150) connecting to nsqd 2024/04/23 14:53:50 INF 77 (localhost:4150) connecting to nsqd 2024/04/23 14:54:03 INF 78 (localhost:4150) connecting to nsqd 2024/04/23 14:54:50 INF 79 (localhost:4150) connecting to nsqd 2024/04/23 14:54:51 INF 80 (localhost:4150) connecting to nsqd 2024/04/23 14:54:57 INF 81 (localhost:4150) connecting to nsqd 2024/04/23 14:55:24 INF 82 (localhost:4150) connecting to nsqd 2024/04/23 14:55:24 INF 83 (localhost:4150) connecting to nsqd 2024/04/23 14:55:25 INF 84 (localhost:4150) connecting to nsqd 2024/04/23 14:58:42 INF 85 (localhost:4150) connecting to nsqd 2024/04/23 14:58:57 INF 86 (localhost:4150) connecting to nsqd 2024/04/23 14:59:01 INF 87 (localhost:4150) connecting to nsqd 2024/04/23 14:59:24 INF 88 (localhost:4150) connecting to nsqd 2024/04/23 14:59:29 INF 89 (localhost:4150) connecting to nsqd 2024/04/23 14:59:42 INF 90 (localhost:4150) connecting to nsqd 2024/04/23 15:00:32 INF 91 (localhost:4150) connecting to nsqd 2024/04/23 15:00:33 INF 92 (localhost:4150) connecting to nsqd 2024/04/23 15:00:40 INF 93 (localhost:4150) connecting to nsqd 2024/04/23 15:01:05 INF 94 (localhost:4150) connecting to nsqd 2024/04/23 15:01:06 INF 95 (localhost:4150) connecting to nsqd 2024/04/23 15:01:06 INF 96 (localhost:4150) connecting to nsqd 2024/04/23 15:06:54 INF 97 (localhost:4150) connecting to nsqd 2024/04/23 15:07:20 INF 98 (localhost:4150) connecting to nsqd 2024/04/23 15:07:22 INF 99 (localhost:4150) connecting to nsqd 2024/04/23 15:07:45 INF 100 (localhost:4150) connecting to nsqd 2024/04/23 15:07:50 INF 101 (localhost:4150) connecting to nsqd 2024/04/23 15:08:00 INF 102 (localhost:4150) connecting to nsqd 2024/04/23 15:09:06 INF 103 (localhost:4150) connecting to nsqd 2024/04/23 15:09:07 INF 104 (localhost:4150) connecting to nsqd 2024/04/23 15:09:14 INF 105 (localhost:4150) connecting to nsqd 2024/04/23 15:09:51 INF 106 (localhost:4150) connecting to nsqd 2024/04/23 15:09:51 INF 107 (localhost:4150) connecting to nsqd 2024/04/23 15:09:51 INF 108 (localhost:4150) connecting to nsqd 2024/04/23 15:13:39 INF 109 (localhost:4150) connecting to nsqd 2024/04/23 15:14:03 INF 110 (localhost:4150) connecting to nsqd 2024/04/23 15:14:09 INF 111 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "cluster1-pgbr-repo" "cluster1" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 10000, "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "200m", "cpu": "500m", "cpu": "500m", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin" "crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin" "crunchy-pgha-scope": "cluster1","pgo-version": "9.9.9","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin" "defaultMode": 288 "defaultMode": 288, "defaultMode": 288, "defaultMode": 420, "deployment-name": "cluster1" "deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1" "deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1" "deployment-name": "cluster1-repl1" "deployment-name": "cluster1-repl1","pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1" "deployment-name": "cluster1-repl1","pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1" "deployment-name": "cluster1-repl2" "deployment-name": "cluster1-repl2","pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "env": [ { "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0423 13:47:53.398017 1 shared_informer.go:240] Waiting for caches to sync for namespace I0423 13:47:53.498524 1 shared_informer.go:247] Caches are synced for namespace I0423 13:47:53.616272 1 shared_informer.go:240] Waiting for caches to sync for version-service-31598 I0423 13:47:53.717466 1 shared_informer.go:247] Caches are synced for version-service-31598 "image": "perconalab/percona-postgresql-operator:main-ppg12-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg12-pgbadger", "image": "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg13-pgbadger", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbadger", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "image": "perconalab/percona-postgresql-operator:PR-721-a67b3475b-pgo-rmdata", "image": "percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest", "image": "percona/percona-postgresql-operator:1.5.1-ppg12-pgbackrest-repo", "image": "percona/percona-postgresql-operator:1.5.1-ppg12-pgbadger", "image": "percona/percona-postgresql-operator:1.5.1-ppg12-pgbouncer", "image": "percona/percona-postgresql-operator:1.5.1-ppg12-postgres-ha", "image": "percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest", "image": "percona/percona-postgresql-operator:1.5.1-ppg13-pgbackrest-repo", "image": "percona/percona-postgresql-operator:1.5.1-ppg13-pgbadger", "image": "percona/percona-postgresql-operator:1.5.1-ppg13-pgbouncer", "image": "percona/percona-postgresql-operator:1.5.1-ppg13-postgres-ha", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 20, "initialDelaySeconds": 30, "items": [ "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "tls.crt", "key": "tls.key", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "limits": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "256Mi" "memory": "48Mi" "memory": "64Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgconf/tls", "mountPath": "/pgconf/tls/pgbouncer", "mountPath": "/pgconf/tls-replication", "mountPath": "/pgdata", "mountPath": "/pgdata", "mountPath": "/report", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "mountPath": "/tmp", "name": "backrest", "name": "backrest-backup-cluster1", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "BADGER_TARGET", "name": "cluster1", "name": "cluster1", "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo" "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo", "name": "cluster1-backrest-shared-repo", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer", "name": "cluster1-pgbouncer-cm" "name": "cluster1-pgbouncer-secret", "name": "cluster1-pgbouncer-secret", "name": "cluster1-pgbr-repo", "name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1" "name": "cluster1-pgha-config", "name": "cluster1-pgha-config", "name": "cluster1-repl1", "name": "cluster1-repl1", "name": "cluster1-repl2", "name": "cluster1-repl2", "name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1","pgo-version": "1.5.1" "name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2","pgo-version": "1.5.1" "name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2","pgo-version": "1.5.1" "name": "cluster1-replication-ssl-keypair", "name": "cluster1-rmdata-advl", "name": "cluster1-rmdata-advl", "name": "cluster1-rmdata-btzx", "name": "cluster1-rmdata-btzx", "name": "cluster1-rmdata-bzht", "name": "cluster1-rmdata-bzht", "name": "cluster1-rmdata-esky", "name": "cluster1-rmdata-esky", "name": "cluster1-rmdata-gnfn", "name": "cluster1-rmdata-gnfn", "name": "cluster1-rmdata-ivip", "name": "cluster1-rmdata-ivip", "name": "cluster1-rmdata-kxca", "name": "cluster1-rmdata-kxca", "name": "cluster1-rmdata-pznu", "name": "cluster1-rmdata-pznu", "name": "cluster1-rmdata-uylx", "name": "cluster1-rmdata-uylx", "name": "cluster1-ssl-ca" "name": "cluster1-ssl-ca" "name": "cluster1-ssl-keypair" "name": "cluster1-ssl-keypair" "name": "cluster1-stanza-create", "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "fulldata-cluster1", "name": "IS_BACKUP", "name": "IS_REPLICA", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbadger" "name": "pgbadger", "name": "PGBADGER_SERVICE_PORT", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "PG_CLUSTER", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_SCOPE", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "REMOVE_BACKUP", "name": "REMOVE_DATA", "name": "REPLICA_NAME", "name": "report", "name": "report", "name": "rmdata", "name": "root-volume" "name": "root-volume", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tls-pgbouncer" "name": "tls-pgbouncer", "name": "tls-replication" "name": "tls-replication", "name": "tls-server" "name": "tls-server", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "tmp" "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "path": "tls-replication.crt" "path": "tls-replication.key" "periodSeconds": 10 "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"cluster1"} "persistentVolumeClaim":{"claimName":"cluster1-repl1"} "persistentVolumeClaim":{"claimName":"cluster1-repl2"} "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pg-cluster": "cluster1" "pg-cluster": "cluster1", "pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1" "pg-cluster": "cluster1","deployment-name": "cluster1-repl1","pgo-version": "1.5.1","name": "cluster1-replica" "pg-cluster": "cluster1","deployment-name": "cluster1-repl2","pgo-version": "1.5.1","name": "cluster1-replica" "pg-cluster": "cluster1","deployment-name": "cluster1-repl2","pgo-version": "9.9.9","name": "cluster1-replica" "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1" "pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1" "pgouser": "admin","crunchy-pgha-scope": "cluster1","pgo-version": "9.9.9","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1" "pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1" "pgo-version": "1.5.1","name": "cluster1","pg-cluster": "cluster1","deployment-name": "cluster1","pgouser": "admin","crunchy-pgha-scope": "cluster1" "pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1" "pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1" "pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2" "pgo-version": "1.5.1","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2" "pgo-version": "9.9.9","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1" "pgo-version": "9.9.9","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl1" "pgo-version": "9.9.9","name": "cluster1-replica","pg-cluster": "cluster1","deployment-name": "cluster1-repl2" "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "pgrmdata": "true", "pgrmdata": "true", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 10000 "port": 2022, "ports": [{ "ports": [{ "ports": [ { "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "privileged": false, "projected": { "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "replicas": 3, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "cluster1-backrest-repo-config" "secretName": "cluster1-backrest-repo-config", "secretName": "cluster1-backrest-repo-config" "secretName": "cluster1-backrest-repo-config", "secretName": "cluster1-postgres-secret" "secretName": "cluster1-primaryuser-secret" "secretName": "cluster1-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": { "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "serviceAccountName": "pgo-target", "service-name": "cluster1-backrest-shared-repo", "service-name": "cluster1-backrest-shared-repo", "service-name": "cluster1-pgbouncer", "service-name": "cluster1-pgbouncer", "service-name": "cluster1-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "2Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "sources": [ "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1Gi" "strategy": { "targetPort": 2022, "tcpSocket": { "template": { time="2024-04-23T13:47:53Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-23T13:48:25Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T13:48:30Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T13:49:44Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T13:49:44Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T13:50:41Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T13:50:41Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T13:50:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T13:50:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:50:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T13:51:54Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T13:51:54Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:51:54Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T13:53:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T13:53:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:53:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T13:54:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T13:54:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:54:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T13:55:58Z" level=error msg="command terminated with exit code 2" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:55:59Z" level=error msg="update perconapgcluster: check is pgcluster primary: no role labels in pod cluster1-6fd546ff4f-h8gwb" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:532" version=1.6.0 time="2024-04-23T13:55:59Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T13:56:14Z" level=error msg="command terminated with exit code 1" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T13:56:14Z" level=warning msg="pg_ctl: PID file \"/pgdata/cluster1-repl1/postmaster.pid\" does not exist\nIs server running?\n" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.applyUpdateToPostgresInstance()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/rolling.go:177" version=1.6.0 time="2024-04-23T13:58:18Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:20Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:21Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:22Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:26Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:30Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:31Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:35Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:37Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:38Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T13:58:44Z" level=error msg="jobs.batch \"cluster1-rmdata-uylx\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T13:59:05Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T13:59:10Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:00:14Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:00:14Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:01:04Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:01:04Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:06:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:06:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:06:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:07:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:07:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:07:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:09:59Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:10:09Z" level=error msg="command terminated with exit code 1" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:10:09Z" level=warning msg="pg_ctl: PID file \"/pgdata/cluster1-repl1/postmaster.pid\" does not exist\nIs server running?\n" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.applyUpdateToPostgresInstance()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/rolling.go:177" version=1.6.0 time="2024-04-23T14:12:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:25Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:26Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:27Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:28Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:35Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:39Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:40Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:45Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:47Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:48Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:12:53Z" level=error msg="jobs.batch \"cluster1-rmdata-pznu\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:13:11Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:13:16Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:13:54Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:13:54Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:14:30Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:14:30Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:18:48Z" level=error msg="14-recommended value for spec.upgradeOptions.apply option is not supported" func="github.com/percona/percona-postgresql-operator/percona/controllers/version.EnsureVersion()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/version/version.go:48" version=1.6.0 time="2024-04-23T14:18:48Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:18:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:18:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:18:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:19:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:19:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:19:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:20:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:20:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:20:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:21:55Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:21:57Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:21:58Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:21:59Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:00Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:01Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:06Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:11Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:12Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:16Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:18Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:19Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:22:24Z" level=error msg="jobs.batch \"cluster1-rmdata-advl\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:22:41Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:22:46Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:23:23Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:23:23Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:24:01Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:24:01Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:26:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:26:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:26:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:28:02Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:28:12Z" level=error msg="command terminated with exit code 1" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:28:12Z" level=warning msg="pg_ctl: PID file \"/pgdata/cluster1-repl2/postmaster.pid\" does not exist\nIs server running?\n" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.applyUpdateToPostgresInstance()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/rolling.go:177" version=1.6.0 time="2024-04-23T14:30:18Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:20Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:21Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:22Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:26Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:31Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:35Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:37Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:38Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:30:44Z" level=error msg="jobs.batch \"cluster1-rmdata-ivip\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:31:05Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:31:10Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:31:50Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:31:50Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:32:27Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:32:27Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:38:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:38:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:38:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:39:45Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:39:56Z" level=error msg="command terminated with exit code 1" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:39:56Z" level=warning msg="pg_ctl: PID file \"/pgdata/cluster1-repl1/postmaster.pid\" does not exist\nIs server running?\n" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.applyUpdateToPostgresInstance()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/rolling.go:177" version=1.6.0 time="2024-04-23T14:41:49Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:41:50Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:41:51Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:41:52Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:41:53Z" level=error msg="ConfigMap Controller: cannot find pgcluster for configMap cluster1-pgha-config (namespace version-service-31598),ignoring" func="github.com/percona/percona-postgresql-operator/internal/controller/configmap.(*Controller).handleConfigMapSync()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/configmap/synchandler.go:55" version=1.6.0 time="2024-04-23T14:41:53Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:41:54Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:01Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:05Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:06Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:09Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:11Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:12Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:42:17Z" level=error msg="jobs.batch \"cluster1-rmdata-btzx\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:42:35Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:42:40Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:43:13Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:43:13Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:43:48Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:43:49Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:43:49Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:43:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T14:43:53Z" level=error msg="pod cluster1-repl2-98499b64-d6rgm does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:43:53Z" level=error msg="pod cluster1-repl2-98499b64-d6rgm does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T14:43:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:43:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T14:43:54Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T14:43:54Z" level=error msg="pod cluster1-repl2-98499b64-d6rgm does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:43:54Z" level=error msg="pod cluster1-repl2-98499b64-d6rgm does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T14:43:54Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:43:54Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T14:51:15Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:51:26Z" level=error msg="command terminated with exit code 1" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:51:26Z" level=warning msg="pg_ctl: PID file \"/pgdata/cluster1-repl1/postmaster.pid\" does not exist\nIs server running?\n" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.applyUpdateToPostgresInstance()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/rolling.go:177" version=1.6.0 time="2024-04-23T14:53:21Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:24Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:25Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:26Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:27Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:33Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:37Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:38Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:42Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:44Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:45Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:53:50Z" level=error msg="jobs.batch \"cluster1-rmdata-bzht\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:54:07Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:54:12Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:54:50Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T14:54:50Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T14:55:25Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:55:25Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T14:56:04Z" level=error msg="update perconapgcluster: ensure version failed to check version: version service apply: Get \"http://not-a-version-service/versions/v1/pg-operator/9.9.9/latest?customResourceUid=aee72fe5-b99c-47a8-a1de-9eb3c9b0d3a6&databaseVersion=13.12+-+Percona+Distribution\": dial tcp: lookup not-a-version-service on 10.230.224.10:53: no such host" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:501" version=1.6.0 time="2024-04-23T14:56:04Z" level=error msg="update perconapgcluster: scheduled update: add func to cron: empty spec string" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:509" version=1.6.0 time="2024-04-23T14:57:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T14:57:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T14:57:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T14:59:01Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:02Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:03Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:04Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:05Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:06Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:07Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:12Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:16Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:17Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:21Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:24Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T14:59:29Z" level=error msg="jobs.batch \"cluster1-rmdata-esky\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T14:59:46Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T14:59:51Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T15:00:32Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T15:00:32Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T15:01:07Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T15:01:07Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T15:04:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).apply()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:255" version=1.6.0 time="2024-04-23T15:04:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T15:04:53Z" level=error msg="unable to upgrade connection: container not found (\"database\")" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:177" version=1.6.0 time="2024-04-23T15:07:22Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:23Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:24Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:25Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:26Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:27Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:34Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:38Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:41Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:44Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:45Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:07:50Z" level=error msg="jobs.batch \"cluster1-rmdata-kxca\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T15:08:04Z" level=error msg="get deployment cluster1-repl1: deployments.apps \"cluster1-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T15:08:09Z" level=error msg="get deployment cluster1-repl2: deployments.apps \"cluster1-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T15:09:06Z" level=error msg="Error completing workflow cluster1-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T15:09:06Z" level=error msg="pgtasks.pg.percona.com \"cluster1-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T15:09:51Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T15:09:52Z" level=warning msg="crv1 pgreplica cluster1-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T15:09:52Z" level=warning msg="crv1 pgreplica cluster1-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T15:09:53Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T15:09:53Z" level=error msg="pod cluster1-repl1-6dff75b887-4m5xr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T15:09:53Z" level=error msg="pod cluster1-repl1-6dff75b887-4m5xr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T15:09:53Z" level=error msg="pod cluster1-repl2-649dd5f676-hjzs6 does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T15:09:53Z" level=error msg="pod cluster1-repl2-649dd5f676-hjzs6 does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T15:13:40Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:42Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:43Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:44Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:45Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:46Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:51Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:55Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:13:56Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:14:00Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:14:02Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:14:03Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:14:04Z" level=error msg="pgclusters.pg.percona.com \"cluster1\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T15:14:09Z" level=error msg="jobs.batch \"cluster1-rmdata-gnfn\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "10000" "value": "5432" "value": "5432" "value": "/backrestrepo/cluster1-backrest-shared-repo" "value": "/backrestrepo/cluster1-backrest-shared-repo" "value": "backup" "value": "cluster1" "value": "cluster1" "value": "cluster1-backrest-shared-repo" "value": "cluster1-backrest-shared-repo-57f668d478-2jmng" "value": "cluster1-backrest-shared-repo-57f668d478-6zlbp" "value": "cluster1-backrest-shared-repo-5f647d77b5-94d7j" "value": "cluster1-backrest-shared-repo-5f647d77b5-mf4tj" "value": "cluster1-backrest-shared-repo-5f647d77b5-n6ml9" "value": "cluster1-backrest-shared-repo-65857c588c-9h2zp" "value": "cluster1-backrest-shared-repo-65857c588c-flx6w" "value": "cluster1-backrest-shared-repo-65857c588c-l8b5h" "value": "cluster1-backrest-shared-repo-65857c588c-pxr64" "value": "cluster1-backrest-shared-repo-85c794b5cb-5jzmn" "value": "cluster1-backrest-shared-repo-85c794b5cb-cq9x6" "value": "cluster1-backrest-shared-repo-85c794b5cb-l82r6" "value": "cluster1-backrest-shared-repo-85c794b5cb-wb66r" "value": "cluster1-backrest-shared-repo-cbc58f67f-nzcnx" "value": "cluster1-repl1" "value": "cluster1-repl2" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.70.201.16 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.22 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.27 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.32 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.38 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.43 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.49 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.54 --db-path=/pgdata/cluster1" "value": " --db-host=10.70.201.59 --db-path=/pgdata/cluster1" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/cluster1" "value": "/pgdata/cluster1" "value": "/pgdata/cluster1-repl1" "value": "/pgdata/cluster1-repl1" "value": "/pgdata/cluster1-repl2" "value": "/pgdata/cluster1-repl2" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.70.200.25 --db-path=/pgdata/cluster1-repl1" "value": "--type=full --db-host=10.70.201.16 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.18 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.201.22 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.27 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.29 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.201.32 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.38 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.43 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.49 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.51 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.201.54 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.201.56 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.201.59 --db-path=/pgdata/cluster1" "value": "--type=full --db-host=10.70.202.30 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.202.52 --db-path=/pgdata/cluster1-repl2" "value": "--type=full --db-host=10.70.202.62 --db-path=/pgdata/cluster1" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumeMounts": [ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.CqySxHODsn ++ mktemp + local LAST_ERR=/tmp/tmp.XdNz32dem5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete -f /tmp/tmp.ZAvsDSImr9/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.CqySxHODsn + cat /tmp/tmp.XdNz32dem5 Error from server (NotFound): error when deleting "/tmp/tmp.ZAvsDSImr9/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.ZAvsDSImr9/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.ZAvsDSImr9/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.ZAvsDSImr9/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.ZAvsDSImr9/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.CqySxHODsn /tmp/tmp.XdNz32dem5 + return 1 + true + yq w -d4 /tmp/tmp.ZAvsDSImr9/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.ZAvsDSImr9/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Sb0C0kz9t0 ++ mktemp + local LAST_ERR=/tmp/tmp.f07qJK4wgi + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl apply -f /tmp/tmp.ZAvsDSImr9/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Sb0C0kz9t0 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.f07qJK4wgi + rm /tmp/tmp.Sb0C0kz9t0 /tmp/tmp.f07qJK4wgi + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=version-service-31598 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n version-service-31598 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.ZAvsDSImr9/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jYPHDb8uMA ++ mktemp + local LAST_ERR=/tmp/tmp.IRVrf9Of0t + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete -f /tmp/tmp.ZAvsDSImr9/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jYPHDb8uMA serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.IRVrf9Of0t + rm /tmp/tmp.jYPHDb8uMA /tmp/tmp.IRVrf9Of0t + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + xargs kubectl delete ns --grace-period=0 --force=true ++ mktemp + local LAST_OUT=/tmp/tmp.5bQb8mjBYz ++ mktemp + local LAST_ERR=/tmp/tmp.jyCuUAxG49 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.5bQb8mjBYz + cat /tmp/tmp.jyCuUAxG49 + rm /tmp/tmp.5bQb8mjBYz /tmp/tmp.jyCuUAxG49 + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "version-service-31598" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.vZIGE1PR9n ++ mktemp + local LAST_ERR=/tmp/tmp.yXAfzSRFzu + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.vZIGE1PR9n + cat /tmp/tmp.yXAfzSRFzu Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.vZIGE1PR9n /tmp/tmp.yXAfzSRFzu + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.5z9d11EnNp ++ mktemp + local LAST_ERR=/tmp/tmp.K205Xpp47s + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-version-service + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.5z9d11EnNp + cat /tmp/tmp.K205Xpp47s Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.5z9d11EnNp /tmp/tmp.K205Xpp47s + return 1 + true + rm -rf /tmp/tmp.ZAvsDSImr9