++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/ns-mode.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/ns-mode.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + main + create_namespace ns-mode-4621 + local namespace=ns-mode-4621 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + awk '{print$1}' + cleanup_rbac ++ mktemp + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.SImI9cADj4 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ZlMUDUgMr4 + local LAST_ERR=/tmp/tmp.jarOS1AGw4 + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.BCatzjnP6m + local exit_status=0 + local timeout=4 + set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SImI9cADj4 + cat /tmp/tmp.jarOS1AGw4 + rm /tmp/tmp.SImI9cADj4 /tmp/tmp.jarOS1AGw4 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.ZlMUDUgMr4 + cat /tmp/tmp.BCatzjnP6m Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.ZlMUDUgMr4 /tmp/tmp.BCatzjnP6m + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.8BcruXktlU ++ mktemp + local LAST_ERR=/tmp/tmp.b0dawOGj2v + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.8BcruXktlU + cat /tmp/tmp.b0dawOGj2v Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.8BcruXktlU /tmp/tmp.b0dawOGj2v + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.PdOMTX102D ++ mktemp + local LAST_ERR=/tmp/tmp.kFMNE2GQvG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.PdOMTX102D + cat /tmp/tmp.kFMNE2GQvG Error from server (NotFound): namespaces "ns-mode-4621" not found + rm /tmp/tmp.PdOMTX102D /tmp/tmp.kFMNE2GQvG + return 1 + : + wait_for_delete namespace/ns-mode-4621 + local res=namespace/ns-mode-4621 + set +o xtrace namespace/ns-mode-4621 - Error from server (NotFound): namespaces "ns-mode-4621" not found + kubectl_bin create namespace ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.w2K8SerxyH ++ mktemp + local LAST_ERR=/tmp/tmp.eww2eMMB7o + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.w2K8SerxyH namespace/ns-mode-4621 created + cat /tmp/tmp.eww2eMMB7o + rm /tmp/tmp.w2K8SerxyH /tmp/tmp.eww2eMMB7o + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ef16PJV1mc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ykWlWZLybZ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ef16PJV1mc ++ cat /tmp/tmp.ykWlWZLybZ ++ rm /tmp/tmp.ef16PJV1mc /tmp/tmp.ykWlWZLybZ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.UDfa2a2KRS ++ mktemp + local LAST_ERR=/tmp/tmp.Wsr1U7shq5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.UDfa2a2KRS Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.Wsr1U7shq5 + rm /tmp/tmp.UDfa2a2KRS /tmp/tmp.Wsr1U7shq5 + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install ns-mode-4621 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "ns-mode-4621"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "ns-mode-4621"#g' + cat /dev/fd/63 - + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f2 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w -d3 - 'subjects[0].namespace' ns-mode-4621 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace ns-mode-4621 ++ cat /tmp/tmp.QW8obiMqqX/operator.ini + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "ns-mode-4621" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "ns-mode-4621"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.DuCaDJGCD5 ++ mktemp + local LAST_ERR=/tmp/tmp.W1XVPH03QS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.DuCaDJGCD5 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.W1XVPH03QS + rm /tmp/tmp.DuCaDJGCD5 /tmp/tmp.W1XVPH03QS + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 35 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hMjaUwoID6 ++ mktemp + local LAST_ERR=/tmp/tmp.bGheRywSEq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hMjaUwoID6 serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.bGheRywSEq + rm /tmp/tmp.hMjaUwoID6 /tmp/tmp.bGheRywSEq + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TWEthqA78t +++ mktemp ++ local LAST_ERR=/tmp/tmp.SSRgkbIUFH ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.TWEthqA78t ++ cat /tmp/tmp.SSRgkbIUFH ++ rm /tmp/tmp.TWEthqA78t /tmp/tmp.SSRgkbIUFH ++ return 0 + wait_pod postgres-operator-858fb9644-8skrs + local pod=postgres-operator-858fb9644-8skrs + local ns= + set +o xtrace postgres-operator-858fb9644-8skrstrue + [[ -n '' ]] + desc 'create first PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PG cluster ----------------------------------------------------------------------------------- + cluster=some-name + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=ns-mode-4621 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name ns-mode-4621 + local cluster=some-name + local namespace=ns-mode-4621 + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n ns-mode-4621 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bu2oWzHeXY ++ mktemp + local LAST_ERR=/tmp/tmp.ntzrwDUL1s + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.bu2oWzHeXY secret/some-name-users-secret created + cat /tmp/tmp.ntzrwDUL1s + rm /tmp/tmp.bu2oWzHeXY /tmp/tmp.ntzrwDUL1s + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n ns-mode-4621 apply -f - + yq w - metadata.name some-name-pmm-secret ++ mktemp + local LAST_OUT=/tmp/tmp.noK2uuVhO9 ++ mktemp + local LAST_ERR=/tmp/tmp.gNMy2KUW2r + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.noK2uuVhO9 secret/some-name-pmm-secret created + cat /tmp/tmp.gNMy2KUW2r + rm /tmp/tmp.noK2uuVhO9 /tmp/tmp.gNMy2KUW2r + return 0 + kubectl_bin -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.D8LfVMNa0W ++ mktemp + local LAST_ERR=/tmp/tmp.QyzIBJnRgo + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.D8LfVMNa0W deployment.apps/pg-client created + cat /tmp/tmp.QyzIBJnRgo + rm /tmp/tmp.D8LfVMNa0W /tmp/tmp.QyzIBJnRgo + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.labels.deployment-name some-name + yq w - spec.clustername some-name + yq w - spec.database some-name + yq w - metadata.name some-name + yq w - spec.name some-name + yq w - spec.keepData false + yq w - metadata.labels.pg-cluster some-name + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.keepBackups false + yq w - spec.user some-name + yq w - metadata.labels.name some-name + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.secretsName some-name-users-secret + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.zRNRFoYXAT ++ mktemp + local LAST_ERR=/tmp/tmp.8kSRwCCl3I + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zRNRFoYXAT perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.8kSRwCCl3I + rm /tmp/tmp.zRNRFoYXAT /tmp/tmp.8kSRwCCl3I + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=ns-mode-4621 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DbMuvPCu9k +++ mktemp ++ local LAST_ERR=/tmp/tmp.qJ2IQFZ9gS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.DbMuvPCu9k ++ cat /tmp/tmp.qJ2IQFZ9gS ++ rm /tmp/tmp.DbMuvPCu9k /tmp/tmp.qJ2IQFZ9gS ++ return 0 + status_query='kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eXQsdp3HLj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3T4HdP7loR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.eXQsdp3HLj +++ cat /tmp/tmp.3T4HdP7loR +++ rm /tmp/tmp.eXQsdp3HLj /tmp/tmp.3T4HdP7loR +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ojfibd6BNT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3oPchNgm89 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Ojfibd6BNT +++ cat /tmp/tmp.3oPchNgm89 +++ rm /tmp/tmp.Ojfibd6BNT /tmp/tmp.3oPchNgm89 +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gyX1BU0vZR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CNMKVIqKIE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.gyX1BU0vZR +++ cat /tmp/tmp.CNMKVIqKIE +++ rm /tmp/tmp.gyX1BU0vZR /tmp/tmp.CNMKVIqKIE +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GesN08VJat ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DES8rOvv1U +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GesN08VJat +++ cat /tmp/tmp.DES8rOvv1U +++ rm /tmp/tmp.GesN08VJat /tmp/tmp.DES8rOvv1U +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GEvNuFEO6U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LMhhUA41uR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GEvNuFEO6U +++ cat /tmp/tmp.LMhhUA41uR +++ rm /tmp/tmp.GEvNuFEO6U /tmp/tmp.LMhhUA41uR +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YzmRvjNVwQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EsftoI3k0e ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.YzmRvjNVwQ ++++ cat /tmp/tmp.EsftoI3k0e ++++ rm /tmp/tmp.YzmRvjNVwQ /tmp/tmp.EsftoI3k0e ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.II1lVmCa2D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LK0yHsOcs5 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.II1lVmCa2D +++ cat /tmp/tmp.LK0yHsOcs5 +++ rm /tmp/tmp.II1lVmCa2D /tmp/tmp.LK0yHsOcs5 +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AHJTzZanGf +++ mktemp ++ local LAST_ERR=/tmp/tmp.iHccUhLeLR ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.AHJTzZanGf ++ cat /tmp/tmp.iHccUhLeLR ++ rm /tmp/tmp.AHJTzZanGf /tmp/tmp.iHccUhLeLR ++ return 0 + local client_container=pg-client-76df9df876-f7tzk + kubectl_bin exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Xju2fT1RQM ++ mktemp + local LAST_ERR=/tmp/tmp.cFAUaLoEuQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Xju2fT1RQM + cat /tmp/tmp.cFAUaLoEuQ + rm /tmp/tmp.Xju2fT1RQM /tmp/tmp.cFAUaLoEuQ + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.quO8hprFG2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l3v5tU7Z0W +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.quO8hprFG2 +++ cat /tmp/tmp.l3v5tU7Z0W +++ rm /tmp/tmp.quO8hprFG2 /tmp/tmp.l3v5tU7Z0W +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sohuFWN2au ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iXtKijszrZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.sohuFWN2au +++ cat /tmp/tmp.iXtKijszrZ +++ rm /tmp/tmp.sohuFWN2au /tmp/tmp.iXtKijszrZ +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ujCtwOMa35 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ytCtTe83NB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ujCtwOMa35 +++ cat /tmp/tmp.ytCtTe83NB +++ rm /tmp/tmp.ujCtwOMa35 /tmp/tmp.ytCtTe83NB +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tcr4hERAVh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oMzXgxerL8 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.tcr4hERAVh ++++ cat /tmp/tmp.oMzXgxerL8 ++++ rm /tmp/tmp.tcr4hERAVh /tmp/tmp.oMzXgxerL8 ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xHZr1aG9xP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3C8LgCopg5 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.xHZr1aG9xP +++ cat /tmp/tmp.3C8LgCopg5 +++ rm /tmp/tmp.xHZr1aG9xP /tmp/tmp.3C8LgCopg5 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6aaKSyqyP4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mYnZnoWsXB ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6aaKSyqyP4 ++ cat /tmp/tmp.mYnZnoWsXB ++ rm /tmp/tmp.6aaKSyqyP4 /tmp/tmp.mYnZnoWsXB ++ return 0 + local client_container=pg-client-76df9df876-f7tzk + kubectl_bin exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.prCIWtWD14 ++ mktemp + local LAST_ERR=/tmp/tmp.HjV5b2xZ7Z + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.prCIWtWD14 + cat /tmp/tmp.HjV5b2xZ7Z + rm /tmp/tmp.prCIWtWD14 /tmp/tmp.HjV5b2xZ7Z + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xbHHVUqe2u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gfbSFCcYju +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.xbHHVUqe2u +++ cat /tmp/tmp.gfbSFCcYju +++ rm /tmp/tmp.xbHHVUqe2u /tmp/tmp.gfbSFCcYju +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hlM3gnwYOx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0KULPGMSsf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.hlM3gnwYOx +++ cat /tmp/tmp.0KULPGMSsf +++ rm /tmp/tmp.hlM3gnwYOx /tmp/tmp.0KULPGMSsf +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.W20xxw0PC3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aVxy2Y9LdT +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.W20xxw0PC3 +++ cat /tmp/tmp.aVxy2Y9LdT +++ rm /tmp/tmp.W20xxw0PC3 /tmp/tmp.aVxy2Y9LdT +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3JPfe8iv26 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gSCc0X61Lt ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.3JPfe8iv26 ++++ cat /tmp/tmp.gSCc0X61Lt ++++ rm /tmp/tmp.3JPfe8iv26 /tmp/tmp.gSCc0X61Lt ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TYYXGOevvz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.19OASdeC02 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TYYXGOevvz +++ cat /tmp/tmp.19OASdeC02 +++ rm /tmp/tmp.TYYXGOevvz /tmp/tmp.19OASdeC02 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CK13RP04gK +++ mktemp ++ local LAST_ERR=/tmp/tmp.HRYFq2cNIc ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.CK13RP04gK ++ cat /tmp/tmp.HRYFq2cNIc ++ rm /tmp/tmp.CK13RP04gK /tmp/tmp.HRYFq2cNIc ++ return 0 + local client_container=pg-client-76df9df876-f7tzk + kubectl_bin exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.a2iv7lMmHO ++ mktemp + local LAST_ERR=/tmp/tmp.W5gYFiNmNo + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-f7tzk -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.a2iv7lMmHO + cat /tmp/tmp.W5gYFiNmNo + rm /tmp/tmp.a2iv7lMmHO /tmp/tmp.W5gYFiNmNo + return 0 + sleep 10 + destroy_operator + kubectl_bin delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.mvY9QdzMa1 ++ mktemp + local LAST_ERR=/tmp/tmp.dKUzLXb4FI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.mvY9QdzMa1 + cat /tmp/tmp.dKUzLXb4FI Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.mvY9QdzMa1 /tmp/tmp.dKUzLXb4FI + return 1 + true + yq w -d4 /tmp/tmp.QW8obiMqqX/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.d335YHa5WZ ++ mktemp + local LAST_ERR=/tmp/tmp.KZ0G4Fa79Z + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.d335YHa5WZ serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.KZ0G4Fa79Z + rm /tmp/tmp.d335YHa5WZ /tmp/tmp.KZ0G4Fa79Z + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.9iuGTshJWt ++ mktemp + local LAST_ERR=/tmp/tmp.Q2PSbyH2fY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9iuGTshJWt serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.Q2PSbyH2fY + rm /tmp/tmp.9iuGTshJWt /tmp/tmp.Q2PSbyH2fY + return 0 + desc 'dynymic operator mode. Should create namespace for cluster by itself' + set +o xtrace ----------------------------------------------------------------------------------- dynymic operator mode. Should create namespace for cluster by itself ----------------------------------------------------------------------------------- + ns=ns-mode-21097 + create_namespace ns-mode-21097 + local namespace=ns-mode-21097 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + cleanup_rbac ++ mktemp + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.p0a4QDzeo1 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.KiQLhIbR4D + local LAST_OUT=/tmp/tmp.FiFNbIl15s + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + local LAST_ERR=/tmp/tmp.Y63PdiI7EB + local exit_status=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.p0a4QDzeo1 + cat /tmp/tmp.KiQLhIbR4D + rm /tmp/tmp.p0a4QDzeo1 /tmp/tmp.KiQLhIbR4D + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level namespace "ns-mode-4621" deleted + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.FiFNbIl15s + cat /tmp/tmp.Y63PdiI7EB Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.FiFNbIl15s /tmp/tmp.Y63PdiI7EB + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.iBFqcV5Bmg ++ mktemp + local LAST_ERR=/tmp/tmp.uBp8KBlOoV + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.iBFqcV5Bmg + cat /tmp/tmp.uBp8KBlOoV Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.iBFqcV5Bmg /tmp/tmp.uBp8KBlOoV + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace ns-mode-21097 ++ mktemp + local LAST_OUT=/tmp/tmp.ybWISAQVEk ++ mktemp + local LAST_ERR=/tmp/tmp.Q9BQya1BVW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-21097 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-21097 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-21097 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.ybWISAQVEk + cat /tmp/tmp.Q9BQya1BVW Error from server (NotFound): namespaces "ns-mode-21097" not found + rm /tmp/tmp.ybWISAQVEk /tmp/tmp.Q9BQya1BVW + return 1 + : + wait_for_delete namespace/ns-mode-21097 + local res=namespace/ns-mode-21097 + set +o xtrace namespace/ns-mode-21097 - Error from server (NotFound): namespaces "ns-mode-21097" not found + kubectl_bin create namespace ns-mode-21097 ++ mktemp + local LAST_OUT=/tmp/tmp.HaoXjn7YeM ++ mktemp + local LAST_ERR=/tmp/tmp.eG6czGHS3X + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace ns-mode-21097 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.HaoXjn7YeM namespace/ns-mode-21097 created + cat /tmp/tmp.eG6czGHS3X + rm /tmp/tmp.HaoXjn7YeM /tmp/tmp.eG6czGHS3X + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KDQ0ZMwLW7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YRi4GNO72U ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.KDQ0ZMwLW7 ++ cat /tmp/tmp.YRi4GNO72U ++ rm /tmp/tmp.KDQ0ZMwLW7 /tmp/tmp.YRi4GNO72U ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-21097 ++ mktemp + local LAST_OUT=/tmp/tmp.zVwxFFis9Z ++ mktemp + local LAST_ERR=/tmp/tmp.NYh5Taue7L + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-21097 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zVwxFFis9Z Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.NYh5Taue7L + rm /tmp/tmp.zVwxFFis9Z /tmp/tmp.NYh5Taue7L + return 0 + export OPERATOR_NS=ns-mode-21097 + OPERATOR_NS=ns-mode-21097 + deploy_operator '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml dynamic + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=dynamic + local action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml dynamic install ns-mode-4621 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=dynamic + local operator_action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "ns-mode-4621"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "dynamic"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "ns-mode-21097"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f1 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace ns-mode-21097 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w -d3 - 'subjects[0].namespace' ns-mode-21097 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.QW8obiMqqX/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "ns-mode-4621" namespace_mode: "dynamic" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "ns-mode-21097"' + [[ -n '' ]] + kubectl_bin apply -n ns-mode-21097 -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nfsPLGFXVL ++ mktemp + local LAST_ERR=/tmp/tmp.t5bvT6NnZI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -n ns-mode-21097 -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nfsPLGFXVL serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.t5bvT6NnZI + rm /tmp/tmp.nfsPLGFXVL /tmp/tmp.t5bvT6NnZI + return 0 + wait_job_completion pgo-deploy true ns-mode-21097 + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-21097 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -n ns-mode-21097 -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.f6t1pbxpN1 ++ mktemp + local LAST_ERR=/tmp/tmp.7pa16kjTaB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -n ns-mode-21097 -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.f6t1pbxpN1 serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.7pa16kjTaB Warning: deleting cluster-scoped resources, not scoped to the provided namespace + rm /tmp/tmp.f6t1pbxpN1 /tmp/tmp.7pa16kjTaB + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qul9zYQaXJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.TM5aFDQGuY ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Qul9zYQaXJ ++ cat /tmp/tmp.TM5aFDQGuY ++ rm /tmp/tmp.Qul9zYQaXJ /tmp/tmp.TM5aFDQGuY ++ return 0 + wait_pod postgres-operator-858fb9644-p97n9 ns-mode-21097 + local pod=postgres-operator-858fb9644-p97n9 + local ns=ns-mode-21097 + set +o xtrace postgres-operator-858fb9644-p97n9true + [[ -n '' ]] + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=ns-mode-4621 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name ns-mode-4621 + local cluster=some-name + local namespace=ns-mode-4621 + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n ns-mode-4621 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iC31RHOB4a ++ mktemp + local LAST_ERR=/tmp/tmp.Iy7gOaTCNA + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.iC31RHOB4a secret/some-name-users-secret created + cat /tmp/tmp.Iy7gOaTCNA + rm /tmp/tmp.iC31RHOB4a /tmp/tmp.Iy7gOaTCNA + return 0 + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n ns-mode-4621 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.bxkowh6iIh ++ mktemp + local LAST_ERR=/tmp/tmp.d2RzeDptm9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.bxkowh6iIh secret/some-name-pmm-secret created + cat /tmp/tmp.d2RzeDptm9 + rm /tmp/tmp.bxkowh6iIh /tmp/tmp.d2RzeDptm9 + return 0 + kubectl_bin -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.lJtlBxip3b ++ mktemp + local LAST_ERR=/tmp/tmp.2j7Zg1Eg51 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lJtlBxip3b deployment.apps/pg-client created + cat /tmp/tmp.2j7Zg1Eg51 + rm /tmp/tmp.lJtlBxip3b /tmp/tmp.2j7Zg1Eg51 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.name some-name + yq w - spec.clustername some-name + yq w - spec.database some-name + yq w - metadata.labels.pg-cluster some-name + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - metadata.labels.name some-name + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - metadata.labels.deployment-name some-name + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.name some-name + yq w - spec.keepBackups false + yq w - spec.user some-name + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.keepData false + yq w - spec.secretsName some-name-users-secret + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.eeXW62rimm ++ mktemp + local LAST_ERR=/tmp/tmp.wSnHHxq4zT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eeXW62rimm perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.wSnHHxq4zT + rm /tmp/tmp.eeXW62rimm /tmp/tmp.wSnHHxq4zT + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=ns-mode-4621 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BtYop7WFUy +++ mktemp ++ local LAST_ERR=/tmp/tmp.1dlPMu75Zx ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 0 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 4 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 8 ++ set -o errexit ++ cat /tmp/tmp.BtYop7WFUy ++ cat /tmp/tmp.1dlPMu75Zx Error from server (NotFound): perconapgclusters.pg.percona.com "some-name" not found ++ rm /tmp/tmp.BtYop7WFUy /tmp/tmp.1dlPMu75Zx ++ return 1 + status_query='kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UPVmtZQ542 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1ABGlBdKNF +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UPVmtZQ542 +++ cat /tmp/tmp.1ABGlBdKNF +++ rm /tmp/tmp.UPVmtZQ542 /tmp/tmp.1ABGlBdKNF +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E8gs2ykvDj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YEW4droFIz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.E8gs2ykvDj +++ cat /tmp/tmp.YEW4droFIz +++ rm /tmp/tmp.E8gs2ykvDj /tmp/tmp.YEW4droFIz +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AhM1My5L5t ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MBzTRoyTCI +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 0 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 4 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 8 +++ set -o errexit +++ cat /tmp/tmp.AhM1My5L5t +++ cat /tmp/tmp.MBzTRoyTCI Error from server (NotFound): perconapgclusters.pg.percona.com "some-name" not found +++ rm /tmp/tmp.AhM1My5L5t /tmp/tmp.MBzTRoyTCI +++ return 1 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AaFpFUpcBi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dXaH362ddw +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.AaFpFUpcBi +++ cat /tmp/tmp.dXaH362ddw +++ rm /tmp/tmp.AaFpFUpcBi /tmp/tmp.dXaH362ddw +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.m7QGspPxxg +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dA5s2q7AIa ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.m7QGspPxxg ++++ cat /tmp/tmp.dA5s2q7AIa ++++ rm /tmp/tmp.m7QGspPxxg /tmp/tmp.dA5s2q7AIa ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7z4fOsouDQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.59rEGhnmQE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.7z4fOsouDQ +++ cat /tmp/tmp.59rEGhnmQE +++ rm /tmp/tmp.7z4fOsouDQ /tmp/tmp.59rEGhnmQE +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nokAp7kGGJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.IASuPhtvzJ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.nokAp7kGGJ ++ cat /tmp/tmp.IASuPhtvzJ ++ rm /tmp/tmp.nokAp7kGGJ /tmp/tmp.IASuPhtvzJ ++ return 0 + local client_container=pg-client-76df9df876-jdws6 + kubectl_bin exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.H9ISSoD9iZ ++ mktemp + local LAST_ERR=/tmp/tmp.wnOIL4TtuF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.H9ISSoD9iZ + cat /tmp/tmp.wnOIL4TtuF + rm /tmp/tmp.H9ISSoD9iZ /tmp/tmp.wnOIL4TtuF + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iWeR2xKlez ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RoNr85yOtu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iWeR2xKlez +++ cat /tmp/tmp.RoNr85yOtu +++ rm /tmp/tmp.iWeR2xKlez /tmp/tmp.RoNr85yOtu +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EfX2AX4DBf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MJ1HnKaqhR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 0 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 4 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 8 +++ set -o errexit +++ cat /tmp/tmp.EfX2AX4DBf +++ cat /tmp/tmp.MJ1HnKaqhR Error from server (NotFound): perconapgclusters.pg.percona.com "some-name" not found +++ rm /tmp/tmp.EfX2AX4DBf /tmp/tmp.MJ1HnKaqhR +++ return 1 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YGAZseEsCn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O6EEJodDgA +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.YGAZseEsCn +++ cat /tmp/tmp.O6EEJodDgA +++ rm /tmp/tmp.YGAZseEsCn /tmp/tmp.O6EEJodDgA +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sIZknmJ6y6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.B3x1KAhNZM ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.sIZknmJ6y6 ++++ cat /tmp/tmp.B3x1KAhNZM ++++ rm /tmp/tmp.sIZknmJ6y6 /tmp/tmp.B3x1KAhNZM ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QXUEbVGtTI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KPTuiKW2IP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QXUEbVGtTI +++ cat /tmp/tmp.KPTuiKW2IP +++ rm /tmp/tmp.QXUEbVGtTI /tmp/tmp.KPTuiKW2IP +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.saXDOMPNKH +++ mktemp ++ local LAST_ERR=/tmp/tmp.NLFkl6U6IA ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.saXDOMPNKH ++ cat /tmp/tmp.NLFkl6U6IA ++ rm /tmp/tmp.saXDOMPNKH /tmp/tmp.NLFkl6U6IA ++ return 0 + local client_container=pg-client-76df9df876-jdws6 + kubectl_bin exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.8q9n6gY5vA ++ mktemp + local LAST_ERR=/tmp/tmp.wUiEswhBRl + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.8q9n6gY5vA + cat /tmp/tmp.wUiEswhBRl + rm /tmp/tmp.8q9n6gY5vA /tmp/tmp.wUiEswhBRl + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qh7zyfkNNB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aTyxDx52fF +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Qh7zyfkNNB +++ cat /tmp/tmp.aTyxDx52fF +++ rm /tmp/tmp.Qh7zyfkNNB /tmp/tmp.aTyxDx52fF +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o0pE2xag3O ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iArbQNNbOB +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 0 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 4 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=1 +++ [[ 1 != 0 ]] +++ sleep 8 +++ set -o errexit +++ cat /tmp/tmp.o0pE2xag3O +++ cat /tmp/tmp.iArbQNNbOB Error from server (NotFound): perconapgclusters.pg.percona.com "some-name" not found +++ rm /tmp/tmp.o0pE2xag3O /tmp/tmp.iArbQNNbOB +++ return 1 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pExMAaVPic ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sQU8BxigwX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.pExMAaVPic +++ cat /tmp/tmp.sQU8BxigwX +++ rm /tmp/tmp.pExMAaVPic /tmp/tmp.sQU8BxigwX +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Kpwp0Bukmg +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SzJkWpmSpc ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.Kpwp0Bukmg ++++ cat /tmp/tmp.SzJkWpmSpc ++++ rm /tmp/tmp.Kpwp0Bukmg /tmp/tmp.SzJkWpmSpc ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aIYH8e5HFd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gaUhYpxSV0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.aIYH8e5HFd +++ cat /tmp/tmp.gaUhYpxSV0 +++ rm /tmp/tmp.aIYH8e5HFd /tmp/tmp.gaUhYpxSV0 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xZx2Fjky2p +++ mktemp ++ local LAST_ERR=/tmp/tmp.TgMY3aDPVV ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.xZx2Fjky2p ++ cat /tmp/tmp.TgMY3aDPVV ++ rm /tmp/tmp.xZx2Fjky2p /tmp/tmp.TgMY3aDPVV ++ return 0 + local client_container=pg-client-76df9df876-jdws6 + kubectl_bin exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.UrCsRrtVHn ++ mktemp + local LAST_ERR=/tmp/tmp.e6DooMJcGL + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-jdws6 -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.UrCsRrtVHn + cat /tmp/tmp.e6DooMJcGL + rm /tmp/tmp.UrCsRrtVHn /tmp/tmp.e6DooMJcGL + return 0 + sleep 10 + destroy_operator + kubectl_bin -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.MjAmvpFjqI ++ mktemp + local LAST_ERR=/tmp/tmp.Lh1akwzOvK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.MjAmvpFjqI + cat /tmp/tmp.Lh1akwzOvK Warning: deleting cluster-scoped resources, not scoped to the provided namespace Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.MjAmvpFjqI /tmp/tmp.Lh1akwzOvK + return 1 + true + yq w -d4 /tmp/tmp.QW8obiMqqX/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin -n ns-mode-21097 apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YAMXhPxIq0 ++ mktemp + local LAST_ERR=/tmp/tmp.YV0ekAgZyB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YAMXhPxIq0 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.YV0ekAgZyB + rm /tmp/tmp.YAMXhPxIq0 /tmp/tmp.YV0ekAgZyB + return 0 + wait_job_completion pgo-deploy true ns-mode-21097 + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-21097 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-21097 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4PWSm9xhho ++ mktemp + local LAST_ERR=/tmp/tmp.G9oJZ7ceFf + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-21097 delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4PWSm9xhho serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.G9oJZ7ceFf Warning: deleting cluster-scoped resources, not scoped to the provided namespace + rm /tmp/tmp.4PWSm9xhho /tmp/tmp.G9oJZ7ceFf + return 0 + destroy_namespace ns-mode-21097 + local namespace=ns-mode-21097 + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete --grace-period=0 --force=true namespace ns-mode-21097 ++ mktemp + local LAST_OUT=/tmp/tmp.9DxQNzDfd6 ++ mktemp + local LAST_ERR=/tmp/tmp.nyRPWYh51X + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete --grace-period=0 --force=true namespace ns-mode-21097 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9DxQNzDfd6 namespace "ns-mode-21097" force deleted + cat /tmp/tmp.nyRPWYh51X Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.9DxQNzDfd6 /tmp/tmp.nyRPWYh51X + return 0 + unset OPERATOR_NS + desc 'readonly operator mode. Should only watch namespace for cluster. No namespace creation or removal' + set +o xtrace ----------------------------------------------------------------------------------- readonly operator mode. Should only watch namespace for cluster. No namespace creation or removal ----------------------------------------------------------------------------------- + create_namespace ns-mode-4621 + local namespace=ns-mode-4621 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + cleanup_rbac ++ mktemp + xargs kubectl delete ns + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.IdAGddOatX + local LAST_OUT=/tmp/tmp.nswXtyJNfP ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.gEZZh5tdCn + local exit_status=0 + local timeout=4 + set +o errexit + local LAST_ERR=/tmp/tmp.2jsSaOpav3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IdAGddOatX + cat /tmp/tmp.gEZZh5tdCn + rm /tmp/tmp.IdAGddOatX /tmp/tmp.gEZZh5tdCn + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level namespace "ns-mode-4621" deleted + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.nswXtyJNfP + cat /tmp/tmp.2jsSaOpav3 Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.nswXtyJNfP /tmp/tmp.2jsSaOpav3 + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.Nj83g1LzxR ++ mktemp + local LAST_ERR=/tmp/tmp.r4Dv1VGBSF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.Nj83g1LzxR + cat /tmp/tmp.r4Dv1VGBSF Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.Nj83g1LzxR /tmp/tmp.r4Dv1VGBSF + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.FlrJp0AqnN ++ mktemp + local LAST_ERR=/tmp/tmp.YpPQCpHXeK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.FlrJp0AqnN namespace "ns-mode-4621" deleted + cat /tmp/tmp.YpPQCpHXeK + rm /tmp/tmp.FlrJp0AqnN /tmp/tmp.YpPQCpHXeK + return 0 + wait_for_delete namespace/ns-mode-4621 + local res=namespace/ns-mode-4621 + set +o xtrace namespace/ns-mode-4621 - Error from server (NotFound): namespaces "ns-mode-4621" not found + kubectl_bin create namespace ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.fmlc7B5DBy ++ mktemp + local LAST_ERR=/tmp/tmp.QNXO3ZuOAc + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.fmlc7B5DBy namespace/ns-mode-4621 created + cat /tmp/tmp.QNXO3ZuOAc + rm /tmp/tmp.fmlc7B5DBy /tmp/tmp.QNXO3ZuOAc + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BQi7cmLH1C +++ mktemp ++ local LAST_ERR=/tmp/tmp.ixOKN4swxc ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.BQi7cmLH1C ++ cat /tmp/tmp.ixOKN4swxc ++ rm /tmp/tmp.BQi7cmLH1C /tmp/tmp.ixOKN4swxc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.b68DWJFud5 ++ mktemp + local LAST_ERR=/tmp/tmp.vJq2saB69c + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.b68DWJFud5 Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.vJq2saB69c + rm /tmp/tmp.b68DWJFud5 /tmp/tmp.vJq2saB69c + return 0 + deploy_operator '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install ns-mode-4621 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=ns-mode-4621 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "ns-mode-4621"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "ns-mode-4621"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f1 ++ cut -d: -f2 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace ns-mode-4621 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w -d3 - 'subjects[0].namespace' ns-mode-4621 ++ cat /tmp/tmp.QW8obiMqqX/operator.ini + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "ns-mode-4621" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "ns-mode-4621"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NGnk1Ab3EI ++ mktemp + local LAST_ERR=/tmp/tmp.ZNGHClMi1j + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.NGnk1Ab3EI serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.ZNGHClMi1j + rm /tmp/tmp.NGnk1Ab3EI /tmp/tmp.ZNGHClMi1j + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 35 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Camue8U8VA ++ mktemp + local LAST_ERR=/tmp/tmp.rHhNebBon2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Camue8U8VA serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.rHhNebBon2 + rm /tmp/tmp.Camue8U8VA /tmp/tmp.rHhNebBon2 + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MFvXAYwtM8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.p5Waawz0L3 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.MFvXAYwtM8 ++ cat /tmp/tmp.p5Waawz0L3 ++ rm /tmp/tmp.MFvXAYwtM8 /tmp/tmp.p5Waawz0L3 ++ return 0 + wait_pod postgres-operator-858fb9644-wwgfm + local pod=postgres-operator-858fb9644-wwgfm + local ns= + set +o xtrace postgres-operator-858fb9644-wwgfmtrue + [[ -n '' ]] + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=ns-mode-4621 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name ns-mode-4621 + local cluster=some-name + local namespace=ns-mode-4621 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + yq w - metadata.name some-name-users-secret + kubectl_bin -n ns-mode-4621 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5WbiCPglbW ++ mktemp + local LAST_ERR=/tmp/tmp.JxXL9wLSsn + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.5WbiCPglbW secret/some-name-users-secret created + cat /tmp/tmp.JxXL9wLSsn + rm /tmp/tmp.5WbiCPglbW /tmp/tmp.JxXL9wLSsn + return 0 + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n ns-mode-4621 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.532E5euryp ++ mktemp + local LAST_ERR=/tmp/tmp.xitlV2swvV + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.532E5euryp secret/some-name-pmm-secret created + cat /tmp/tmp.xitlV2swvV + rm /tmp/tmp.532E5euryp /tmp/tmp.xitlV2swvV + return 0 + kubectl_bin -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xGdEwpdiUC ++ mktemp + local LAST_ERR=/tmp/tmp.0YW5fX7fPZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.xGdEwpdiUC deployment.apps/pg-client created + cat /tmp/tmp.0YW5fX7fPZ + rm /tmp/tmp.xGdEwpdiUC /tmp/tmp.0YW5fX7fPZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.labels.name some-name + yq w - spec.database some-name + yq w - metadata.name some-name + yq w - spec.clustername some-name + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - metadata.labels.pg-cluster some-name + yq w - spec.keepBackups false + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.name some-name + yq w - metadata.labels.deployment-name some-name + yq w - spec.keepData false + yq w - spec.user some-name + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.secretsName some-name-users-secret + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 ++ mktemp + local LAST_OUT=/tmp/tmp.lmz7rjwRvE ++ mktemp + local LAST_ERR=/tmp/tmp.VubYA20sfo + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lmz7rjwRvE perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.VubYA20sfo + rm /tmp/tmp.lmz7rjwRvE /tmp/tmp.VubYA20sfo + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name-backrest-shared-repo... + wait_deployment some-name + local name=some-name + local target_namespace=ns-mode-4621 + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=ns-mode-4621 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=ns-mode-4621 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7isik9RQ1s +++ mktemp ++ local LAST_ERR=/tmp/tmp.vLjUpTfSs8 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.7isik9RQ1s ++ cat /tmp/tmp.vLjUpTfSs8 ++ rm /tmp/tmp.7isik9RQ1s /tmp/tmp.vLjUpTfSs8 ++ return 0 + status_query='kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tQbjdD17ha ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZGhlQw3gZL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tQbjdD17ha +++ cat /tmp/tmp.ZGhlQw3gZL +++ rm /tmp/tmp.tQbjdD17ha /tmp/tmp.ZGhlQw3gZL +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tVFp4GDDhj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yG1qqyG6ef +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tVFp4GDDhj +++ cat /tmp/tmp.yG1qqyG6ef +++ rm /tmp/tmp.tVFp4GDDhj /tmp/tmp.yG1qqyG6ef +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BNVJ8rMXa7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bMRY1VtBRf +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.BNVJ8rMXa7 +++ cat /tmp/tmp.bMRY1VtBRf +++ rm /tmp/tmp.BNVJ8rMXa7 /tmp/tmp.bMRY1VtBRf +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GbgY53mFl8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nQIkGMD5Qr +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GbgY53mFl8 +++ cat /tmp/tmp.nQIkGMD5Qr +++ rm /tmp/tmp.GbgY53mFl8 /tmp/tmp.nQIkGMD5Qr +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QhClikOdoU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NsMrarcX6m +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QhClikOdoU +++ cat /tmp/tmp.NsMrarcX6m +++ rm /tmp/tmp.QhClikOdoU /tmp/tmp.NsMrarcX6m +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LSO2UEoU9o +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ZlhnPMuqgR ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.LSO2UEoU9o ++++ cat /tmp/tmp.ZlhnPMuqgR ++++ rm /tmp/tmp.LSO2UEoU9o /tmp/tmp.ZlhnPMuqgR ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T0vCt9hkgY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jCYYWnmZ91 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.T0vCt9hkgY +++ cat /tmp/tmp.jCYYWnmZ91 +++ rm /tmp/tmp.T0vCt9hkgY /tmp/tmp.jCYYWnmZ91 +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mLe9PW7tqE +++ mktemp ++ local LAST_ERR=/tmp/tmp.rvgIG4n4kl ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.mLe9PW7tqE ++ cat /tmp/tmp.rvgIG4n4kl ++ rm /tmp/tmp.mLe9PW7tqE /tmp/tmp.rvgIG4n4kl ++ return 0 + local client_container=pg-client-76df9df876-xldbx + kubectl_bin exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.WhGDyS2cku ++ mktemp + local LAST_ERR=/tmp/tmp.gjJSfjcUGK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WhGDyS2cku + cat /tmp/tmp.gjJSfjcUGK + rm /tmp/tmp.WhGDyS2cku /tmp/tmp.gjJSfjcUGK + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tMcgDMccwu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VFV8D7NGpl +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tMcgDMccwu +++ cat /tmp/tmp.VFV8D7NGpl +++ rm /tmp/tmp.tMcgDMccwu /tmp/tmp.VFV8D7NGpl +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UgNZYFvlK2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9DfXVeZMqJ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UgNZYFvlK2 +++ cat /tmp/tmp.9DfXVeZMqJ +++ rm /tmp/tmp.UgNZYFvlK2 /tmp/tmp.9DfXVeZMqJ +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KHzMZFBf0A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U6ouD0JC6L +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.KHzMZFBf0A +++ cat /tmp/tmp.U6ouD0JC6L +++ rm /tmp/tmp.KHzMZFBf0A /tmp/tmp.U6ouD0JC6L +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.S4MiwEP3zc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UXeR0cA26l ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.S4MiwEP3zc ++++ cat /tmp/tmp.UXeR0cA26l ++++ rm /tmp/tmp.S4MiwEP3zc /tmp/tmp.UXeR0cA26l ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AInz1zn9lu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cfzEd7gxmP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.AInz1zn9lu +++ cat /tmp/tmp.cfzEd7gxmP +++ rm /tmp/tmp.AInz1zn9lu /tmp/tmp.cfzEd7gxmP +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lv3vTInjiV +++ mktemp ++ local LAST_ERR=/tmp/tmp.8mfDjSqOWm ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Lv3vTInjiV ++ cat /tmp/tmp.8mfDjSqOWm ++ rm /tmp/tmp.Lv3vTInjiV /tmp/tmp.8mfDjSqOWm ++ return 0 + local client_container=pg-client-76df9df876-xldbx + kubectl_bin exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.XnFrTrlpmo ++ mktemp + local LAST_ERR=/tmp/tmp.JlI04cZ4CQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XnFrTrlpmo + cat /tmp/tmp.JlI04cZ4CQ + rm /tmp/tmp.XnFrTrlpmo /tmp/tmp.JlI04cZ4CQ + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fAqyPkvxNQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zXVzpph3pi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fAqyPkvxNQ +++ cat /tmp/tmp.zXVzpph3pi +++ rm /tmp/tmp.fAqyPkvxNQ /tmp/tmp.zXVzpph3pi +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZmrcRX5V10 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AZrxqCBBos +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ZmrcRX5V10 +++ cat /tmp/tmp.AZrxqCBBos +++ rm /tmp/tmp.ZmrcRX5V10 /tmp/tmp.AZrxqCBBos +++ return 0 +++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kzJBbKE9KI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8QHebNuX18 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kzJBbKE9KI +++ cat /tmp/tmp.8QHebNuX18 +++ rm /tmp/tmp.kzJBbKE9KI /tmp/tmp.8QHebNuX18 +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.XnWbXkvAsP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rVhTCMWwbp ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.XnWbXkvAsP ++++ cat /tmp/tmp.rVhTCMWwbp ++++ rm /tmp/tmp.XnWbXkvAsP /tmp/tmp.rVhTCMWwbp ++++ return 0 +++ kubectl_bin -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RVYyC9BLcW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2ryAcYzGIH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.RVYyC9BLcW +++ cat /tmp/tmp.2ryAcYzGIH +++ rm /tmp/tmp.RVYyC9BLcW /tmp/tmp.2ryAcYzGIH +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.ns-mode-4621 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.ns-mode-4621 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.scLmh9c9mq +++ mktemp ++ local LAST_ERR=/tmp/tmp.gA99RrlN1m ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.scLmh9c9mq ++ cat /tmp/tmp.gA99RrlN1m ++ rm /tmp/tmp.scLmh9c9mq /tmp/tmp.gA99RrlN1m ++ return 0 + local client_container=pg-client-76df9df876-xldbx + kubectl_bin exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.ptMN2pKsva ++ mktemp + local LAST_ERR=/tmp/tmp.gIkiEFoYjQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-xldbx -n ns-mode-4621 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ptMN2pKsva + cat /tmp/tmp.gIkiEFoYjQ + rm /tmp/tmp.ptMN2pKsva /tmp/tmp.gIkiEFoYjQ + return 0 + sleep 10 + create_namespace ns-mode-4621-disabled true + local namespace=ns-mode-4621-disabled + local skip_clean_namespace=true + [[ 1 == 1 ]] + [[ -z true ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace ns-mode-4621-disabled ++ mktemp + local LAST_OUT=/tmp/tmp.EqAmiWO0vo ++ mktemp + local LAST_ERR=/tmp/tmp.OTyI7Yfana + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621-disabled + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621-disabled + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace ns-mode-4621-disabled + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.EqAmiWO0vo + cat /tmp/tmp.OTyI7Yfana Error from server (NotFound): namespaces "ns-mode-4621-disabled" not found + rm /tmp/tmp.EqAmiWO0vo /tmp/tmp.OTyI7Yfana + return 1 + : + wait_for_delete namespace/ns-mode-4621-disabled + local res=namespace/ns-mode-4621-disabled + set +o xtrace namespace/ns-mode-4621-disabled - Error from server (NotFound): namespaces "ns-mode-4621-disabled" not found + kubectl_bin create namespace ns-mode-4621-disabled ++ mktemp + local LAST_OUT=/tmp/tmp.0eHe1E3Q2f ++ mktemp + local LAST_ERR=/tmp/tmp.lQaJQPkOa9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace ns-mode-4621-disabled + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0eHe1E3Q2f namespace/ns-mode-4621-disabled created + cat /tmp/tmp.lQaJQPkOa9 + rm /tmp/tmp.0eHe1E3Q2f /tmp/tmp.lQaJQPkOa9 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.UZc1yaXukh +++ mktemp ++ local LAST_ERR=/tmp/tmp.oy2Rl4dM1w ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.UZc1yaXukh ++ cat /tmp/tmp.oy2Rl4dM1w ++ rm /tmp/tmp.UZc1yaXukh /tmp/tmp.oy2Rl4dM1w ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621-disabled ++ mktemp + local LAST_OUT=/tmp/tmp.E9EwtaP2lK ++ mktemp + local LAST_ERR=/tmp/tmp.E3OaVFJIFQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=ns-mode-4621-disabled + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.E9EwtaP2lK Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.E3OaVFJIFQ + rm /tmp/tmp.E9EwtaP2lK /tmp/tmp.E3OaVFJIFQ + return 0 + export OPERATOR_NS=ns-mode-4621-disabled + OPERATOR_NS=ns-mode-4621-disabled + deploy_operator '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled '' ns-mode-4621-disabled + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=ns-mode-4621-disabled + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install ns-mode-4621-disabled false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=ns-mode-4621-disabled + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "ns-mode-4621-disabled"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "ns-mode-4621-disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ echo 'crunchy_debug: "true"' ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w -d3 - 'subjects[0].namespace' ns-mode-4621-disabled + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace ns-mode-4621-disabled ++ cat /tmp/tmp.QW8obiMqqX/operator.ini + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "ns-mode-4621-disabled" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "ns-mode-4621-disabled"' + [[ -n '' ]] + kubectl_bin apply -n ns-mode-4621-disabled -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.V45fWipaSC ++ mktemp + local LAST_ERR=/tmp/tmp.6tP2o6O3Fz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -n ns-mode-4621-disabled -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.V45fWipaSC serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.6tP2o6O3Fz + rm /tmp/tmp.V45fWipaSC /tmp/tmp.6tP2o6O3Fz + return 0 + wait_job_completion pgo-deploy true ns-mode-4621-disabled + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621-disabled + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -n ns-mode-4621-disabled -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.wzqYuAPiGr ++ mktemp + local LAST_ERR=/tmp/tmp.btL1B6oRjg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -n ns-mode-4621-disabled -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wzqYuAPiGr serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.btL1B6oRjg Warning: deleting cluster-scoped resources, not scoped to the provided namespace + rm /tmp/tmp.wzqYuAPiGr /tmp/tmp.btL1B6oRjg + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lLJ9cnSjMl +++ mktemp ++ local LAST_ERR=/tmp/tmp.MFmnIV2ncF ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.lLJ9cnSjMl ++ cat /tmp/tmp.MFmnIV2ncF ++ rm /tmp/tmp.lLJ9cnSjMl /tmp/tmp.MFmnIV2ncF ++ return 0 + wait_pod postgres-operator-5bf675dd87-9kvb8 ns-mode-4621-disabled + local pod=postgres-operator-5bf675dd87-9kvb8 + local ns=ns-mode-4621-disabled + set +o xtrace postgres-operator-5bf675dd87-9kvb8true + [[ -n '' ]] + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml '' '' '' '' '' '' '' ns-mode-4621-disabled + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=ns-mode-4621-disabled + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name ns-mode-4621-disabled + local cluster=some-name + local namespace=ns-mode-4621-disabled + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n ns-mode-4621-disabled apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.S9cCHE3AmB ++ mktemp + local LAST_ERR=/tmp/tmp.BoniTSxlzJ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.S9cCHE3AmB secret/some-name-users-secret created + cat /tmp/tmp.BoniTSxlzJ + rm /tmp/tmp.S9cCHE3AmB /tmp/tmp.BoniTSxlzJ + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n ns-mode-4621-disabled apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OzDiT2H6QI ++ mktemp + local LAST_ERR=/tmp/tmp.NFFUrRsxdx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.OzDiT2H6QI secret/some-name-pmm-secret created + cat /tmp/tmp.NFFUrRsxdx + rm /tmp/tmp.OzDiT2H6QI /tmp/tmp.NFFUrRsxdx + return 0 + kubectl_bin -n ns-mode-4621-disabled apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Z3RISFgyt3 ++ mktemp + local LAST_ERR=/tmp/tmp.boK13duriY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Z3RISFgyt3 deployment.apps/pg-client created + cat /tmp/tmp.boK13duriY + rm /tmp/tmp.Z3RISFgyt3 /tmp/tmp.boK13duriY + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.deployment-name some-name + yq w - metadata.labels.name some-name + yq w - spec.database some-name + yq w - spec.keepBackups false + yq w - metadata.name some-name + yq w - spec.clustername some-name + yq w - spec.user some-name + yq w - spec.name some-name + yq w - spec.secretsName some-name-users-secret + yq w - spec.keepData false + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - metadata.labels.pg-cluster some-name + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - metadata.labels.crunchy-pgha-scope some-name + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621-disabled ++ mktemp + local LAST_OUT=/tmp/tmp.bK5677wLKE ++ mktemp + local LAST_ERR=/tmp/tmp.IXRO65447R + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.QW8obiMqqX/cr.yaml -n ns-mode-4621-disabled + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.bK5677wLKE perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.IXRO65447R + rm /tmp/tmp.bK5677wLKE /tmp/tmp.IXRO65447R + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=ns-mode-4621-disabled + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=ns-mode-4621-disabled + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=ns-mode-4621-disabled + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=ns-mode-4621-disabled + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=ns-mode-4621-disabled + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3SthoQZwwZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.w8b1Oy1ysW ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.3SthoQZwwZ ++ cat /tmp/tmp.w8b1Oy1ysW ++ rm /tmp/tmp.3SthoQZwwZ /tmp/tmp.w8b1Oy1ysW ++ return 0 + status_query='kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x97fJ98W2Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gH8WLXVKlt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.x97fJ98W2Y +++ cat /tmp/tmp.gH8WLXVKlt +++ rm /tmp/tmp.x97fJ98W2Y /tmp/tmp.gH8WLXVKlt +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.I8SksZ9BO0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F2RsmEfKrt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.I8SksZ9BO0 +++ cat /tmp/tmp.F2RsmEfKrt +++ rm /tmp/tmp.I8SksZ9BO0 /tmp/tmp.F2RsmEfKrt +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621-disabled +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UIEwFBckLz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dNZkBHo2XK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UIEwFBckLz +++ cat /tmp/tmp.dNZkBHo2XK +++ rm /tmp/tmp.UIEwFBckLz /tmp/tmp.dNZkBHo2XK +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B6Kt6Stau1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.L6wFSx84bb +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.B6Kt6Stau1 +++ cat /tmp/tmp.L6wFSx84bb +++ rm /tmp/tmp.B6Kt6Stau1 /tmp/tmp.L6wFSx84bb +++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9RG85tpHNC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QxMe1bV6HZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9RG85tpHNC +++ cat /tmp/tmp.QxMe1bV6HZ +++ rm /tmp/tmp.9RG85tpHNC /tmp/tmp.QxMe1bV6HZ +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vNiJp68YOv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.aqcvLokh1M ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.vNiJp68YOv ++++ cat /tmp/tmp.aqcvLokh1M ++++ rm /tmp/tmp.vNiJp68YOv /tmp/tmp.aqcvLokh1M ++++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B9GFmCmjYt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SbI5Prtb4S +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.B9GFmCmjYt +++ cat /tmp/tmp.SbI5Prtb4S +++ rm /tmp/tmp.B9GFmCmjYt /tmp/tmp.SbI5Prtb4S +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.ns-mode-4621-disabled + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.ns-mode-4621-disabled + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.049fERtxwE +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKH6X0SKKF ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.049fERtxwE ++ cat /tmp/tmp.eKH6X0SKKF ++ rm /tmp/tmp.049fERtxwE /tmp/tmp.eKH6X0SKKF ++ return 0 + local client_container=pg-client-76df9df876-7mk88 + kubectl_bin exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.p7FzSiCsAp ++ mktemp + local LAST_ERR=/tmp/tmp.TUUC4E9wHG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.p7FzSiCsAp + cat /tmp/tmp.TUUC4E9wHG + rm /tmp/tmp.p7FzSiCsAp /tmp/tmp.TUUC4E9wHG + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621-disabled +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XmhGfbfLZu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7B9jeYGXLN +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.XmhGfbfLZu +++ cat /tmp/tmp.7B9jeYGXLN +++ rm /tmp/tmp.XmhGfbfLZu /tmp/tmp.7B9jeYGXLN +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8aylDhOEst ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JAHcUkZ7ML +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8aylDhOEst +++ cat /tmp/tmp.JAHcUkZ7ML +++ rm /tmp/tmp.8aylDhOEst /tmp/tmp.JAHcUkZ7ML +++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ILOXM3xKLx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.agaIuZA0aA +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ILOXM3xKLx +++ cat /tmp/tmp.agaIuZA0aA +++ rm /tmp/tmp.ILOXM3xKLx /tmp/tmp.agaIuZA0aA +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xcc15qCvzr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.urCEMAtzJl ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.xcc15qCvzr ++++ cat /tmp/tmp.urCEMAtzJl ++++ rm /tmp/tmp.xcc15qCvzr /tmp/tmp.urCEMAtzJl ++++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NTBTmN96ZW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MC9biKpeaS +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.NTBTmN96ZW +++ cat /tmp/tmp.MC9biKpeaS +++ rm /tmp/tmp.NTBTmN96ZW /tmp/tmp.MC9biKpeaS +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.ns-mode-4621-disabled + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.ns-mode-4621-disabled + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6uRvyQs9Pp +++ mktemp ++ local LAST_ERR=/tmp/tmp.qusIu8k2gu ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6uRvyQs9Pp ++ cat /tmp/tmp.qusIu8k2gu ++ rm /tmp/tmp.6uRvyQs9Pp /tmp/tmp.qusIu8k2gu ++ return 0 + local client_container=pg-client-76df9df876-7mk88 + kubectl_bin exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.oFAj2IldOO ++ mktemp + local LAST_ERR=/tmp/tmp.oz890ZnctP + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oFAj2IldOO + cat /tmp/tmp.oz890ZnctP + rm /tmp/tmp.oFAj2IldOO /tmp/tmp.oz890ZnctP + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=ns-mode-4621-disabled +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g9zefjbF6v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W7tr15UWMe +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.g9zefjbF6v +++ cat /tmp/tmp.W7tr15UWMe +++ rm /tmp/tmp.g9zefjbF6v /tmp/tmp.W7tr15UWMe +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7CYCJyvl8f ++++ mktemp +++ local LAST_ERR=/tmp/tmp.noeKN7vPDy +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.7CYCJyvl8f +++ cat /tmp/tmp.noeKN7vPDy +++ rm /tmp/tmp.7CYCJyvl8f /tmp/tmp.noeKN7vPDy +++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.utt49ZQ8kv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YORR1OVhVl +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.utt49ZQ8kv +++ cat /tmp/tmp.YORR1OVhVl +++ rm /tmp/tmp.utt49ZQ8kv /tmp/tmp.YORR1OVhVl +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nqz548ihBa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5WzBzwGMZG ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n ns-mode-4621-disabled get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.nqz548ihBa ++++ cat /tmp/tmp.5WzBzwGMZG ++++ rm /tmp/tmp.nqz548ihBa /tmp/tmp.5WzBzwGMZG ++++ return 0 +++ kubectl_bin -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DuZqp2debu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PgZw2MkLz4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n ns-mode-4621-disabled get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.DuZqp2debu +++ cat /tmp/tmp.PgZw2MkLz4 +++ rm /tmp/tmp.DuZqp2debu /tmp/tmp.PgZw2MkLz4 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.ns-mode-4621-disabled + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.ns-mode-4621-disabled + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.loTWQUxBCg +++ mktemp ++ local LAST_ERR=/tmp/tmp.RFRDZ83bHN ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n ns-mode-4621-disabled get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.loTWQUxBCg ++ cat /tmp/tmp.RFRDZ83bHN ++ rm /tmp/tmp.loTWQUxBCg /tmp/tmp.RFRDZ83bHN ++ return 0 + local client_container=pg-client-76df9df876-7mk88 + kubectl_bin exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.DxqbKA4pGJ ++ mktemp + local LAST_ERR=/tmp/tmp.ZJx4E9fd7s + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-7mk88 -n ns-mode-4621-disabled -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.ns-mode-4621-disabled.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.DxqbKA4pGJ + cat /tmp/tmp.ZJx4E9fd7s + rm /tmp/tmp.DxqbKA4pGJ /tmp/tmp.ZJx4E9fd7s + return 0 + sleep 10 + destroy_operator + kubectl_bin -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rYZwGI88d7 ++ mktemp + local LAST_ERR=/tmp/tmp.2rNo7YPIZc + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.rYZwGI88d7 + cat /tmp/tmp.2rNo7YPIZc Warning: deleting cluster-scoped resources, not scoped to the provided namespace Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.rYZwGI88d7 /tmp/tmp.2rNo7YPIZc + return 1 + true + yq w -d4 /tmp/tmp.QW8obiMqqX/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin -n ns-mode-4621-disabled apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.eCI0R518gy ++ mktemp + local LAST_ERR=/tmp/tmp.UKImJ4NNRs + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eCI0R518gy serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.UKImJ4NNRs + rm /tmp/tmp.eCI0R518gy /tmp/tmp.UKImJ4NNRs + return 0 + wait_job_completion pgo-deploy true ns-mode-4621-disabled + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621-disabled + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.KvmvIhuyOj ++ mktemp + local LAST_ERR=/tmp/tmp.Vf7AfpHHdN + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.KvmvIhuyOj serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.Vf7AfpHHdN Warning: deleting cluster-scoped resources, not scoped to the provided namespace + rm /tmp/tmp.KvmvIhuyOj /tmp/tmp.Vf7AfpHHdN + return 0 + destroy + grep -v level=debug + grep -v level=info + grep -v 'Getting tasks for pod' + grep -v 'Getting pods from source' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + tee /tmp/tmp.QW8obiMqqX/operator.log + sort -u ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BpP6ke8Qvc +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKxd82Bo2z ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 0 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 4 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ [[ 1 != 0 ]] ++ sleep 8 ++ set -o errexit ++ cat /tmp/tmp.BpP6ke8Qvc ++ cat /tmp/tmp.eKxd82Bo2z error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} ++ rm /tmp/tmp.BpP6ke8Qvc /tmp/tmp.eKxd82Bo2z ++ return 1 + kubectl_bin -n ns-mode-4621-disabled logs -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.tIU5ulIH4f ++ mktemp + local LAST_ERR=/tmp/tmp.XwwJ5SWUgA + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled logs -c operator + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled logs -c operator + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled logs -c operator + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.tIU5ulIH4f + cat /tmp/tmp.XwwJ5SWUgA error: expected 'logs [-f] [-p] (POD | TYPE/NAME) [-c CONTAINER]'. POD or TYPE/NAME is a required argument for the logs command See 'kubectl logs -h' for help and examples + rm /tmp/tmp.tIU5ulIH4f /tmp/tmp.XwwJ5SWUgA + return 1 + destroy_operator + kubectl_bin -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.v3wYNk8k4j ++ mktemp + local LAST_ERR=/tmp/tmp.f7ZISmIo4Q + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.v3wYNk8k4j + cat /tmp/tmp.f7ZISmIo4Q Warning: deleting cluster-scoped resources, not scoped to the provided namespace Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.QW8obiMqqX/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.v3wYNk8k4j /tmp/tmp.f7ZISmIo4Q + return 1 + true + yq w -d4 /tmp/tmp.QW8obiMqqX/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin -n ns-mode-4621-disabled apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ST51uIG5O5 ++ mktemp + local LAST_ERR=/tmp/tmp.TK966cDe8b + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled apply -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ST51uIG5O5 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.TK966cDe8b + rm /tmp/tmp.ST51uIG5O5 /tmp/tmp.TK966cDe8b + return 0 + wait_job_completion pgo-deploy true ns-mode-4621-disabled + local job=pgo-deploy + local mandatory=true + local ns=ns-mode-4621-disabled + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n ns-mode-4621-disabled get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hJtEaAGTdD ++ mktemp + local LAST_ERR=/tmp/tmp.PzMUW8xCsE + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n ns-mode-4621-disabled delete -f /tmp/tmp.QW8obiMqqX/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hJtEaAGTdD serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.PzMUW8xCsE Warning: deleting cluster-scoped resources, not scoped to the provided namespace + rm /tmp/tmp.hJtEaAGTdD /tmp/tmp.PzMUW8xCsE + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' ++ mktemp + xargs kubectl delete ns --grace-period=0 --force=true + awk '{print$1}' + local LAST_OUT=/tmp/tmp.qw2w1vAr0b ++ mktemp + local LAST_ERR=/tmp/tmp.sl3JpJK8Xx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qw2w1vAr0b + cat /tmp/tmp.sl3JpJK8Xx + rm /tmp/tmp.qw2w1vAr0b /tmp/tmp.sl3JpJK8Xx + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "ns-mode-4621" force deleted namespace "ns-mode-4621-disabled" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.HbfG67HsC0 ++ mktemp + local LAST_ERR=/tmp/tmp.BzpJHDZCo5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.HbfG67HsC0 + cat /tmp/tmp.BzpJHDZCo5 Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.HbfG67HsC0 /tmp/tmp.BzpJHDZCo5 + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.KQgqa5n6El ++ mktemp + local LAST_ERR=/tmp/tmp.3KpZZt2DnA + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.KQgqa5n6El + cat /tmp/tmp.3KpZZt2DnA Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.KQgqa5n6El /tmp/tmp.3KpZZt2DnA + return 1 + true + rm -rf /tmp/tmp.QW8obiMqqX