++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/affinity.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/affinity.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + create_namespace affinity-4406 + local namespace=affinity-4406 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + cleanup_rbac + awk '{print$1}' + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.43jaNQ3yXC ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.I2MUuuJ4eD + local LAST_OUT=/tmp/tmp.ZSgtVgewZc + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + local LAST_ERR=/tmp/tmp.4P27iLcOHY + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.43jaNQ3yXC + cat /tmp/tmp.I2MUuuJ4eD + rm /tmp/tmp.43jaNQ3yXC /tmp/tmp.I2MUuuJ4eD + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.ZSgtVgewZc + cat /tmp/tmp.4P27iLcOHY Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.ZSgtVgewZc /tmp/tmp.4P27iLcOHY + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.4b1wgzY1Hi ++ mktemp + local LAST_ERR=/tmp/tmp.KyWOGj7OQE + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.4b1wgzY1Hi + cat /tmp/tmp.KyWOGj7OQE Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.4b1wgzY1Hi /tmp/tmp.KyWOGj7OQE + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.PHbH6ComQC ++ mktemp + local LAST_ERR=/tmp/tmp.YO66ITDuRr + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace affinity-4406 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace affinity-4406 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace affinity-4406 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.PHbH6ComQC + cat /tmp/tmp.YO66ITDuRr Error from server (NotFound): namespaces "affinity-4406" not found + rm /tmp/tmp.PHbH6ComQC /tmp/tmp.YO66ITDuRr + return 1 + : + wait_for_delete namespace/affinity-4406 + local res=namespace/affinity-4406 + set +o xtrace namespace/affinity-4406 - Error from server (NotFound): namespaces "affinity-4406" not found + kubectl_bin create namespace affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.WX0xL351Yr ++ mktemp + local LAST_ERR=/tmp/tmp.sCw1n6cmaw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace affinity-4406 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WX0xL351Yr namespace/affinity-4406 created + cat /tmp/tmp.sCw1n6cmaw + rm /tmp/tmp.WX0xL351Yr /tmp/tmp.sCw1n6cmaw + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.0P5Q3Oqf1P +++ mktemp ++ local LAST_ERR=/tmp/tmp.eCzR3Y1hfg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.0P5Q3Oqf1P ++ cat /tmp/tmp.eCzR3Y1hfg ++ rm /tmp/tmp.0P5Q3Oqf1P /tmp/tmp.eCzR3Y1hfg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.z3xLVJ8eMb ++ mktemp + local LAST_ERR=/tmp/tmp.5VZzlEfkbz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=affinity-4406 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.z3xLVJ8eMb Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.5VZzlEfkbz + rm /tmp/tmp.z3xLVJ8eMb /tmp/tmp.5VZzlEfkbz + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=affinity-4406 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install affinity-4406 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=affinity-4406 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "affinity-4406"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "affinity-4406"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f2 ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace affinity-4406 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w -d3 - 'subjects[0].namespace' affinity-4406 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.40ZIkkeOze/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "affinity-4406" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "affinity-4406"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.40ZIkkeOze/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qtNRAH7VXd ++ mktemp + local LAST_ERR=/tmp/tmp.98d5udwadx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.40ZIkkeOze/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qtNRAH7VXd serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.98d5udwadx + rm /tmp/tmp.qtNRAH7VXd /tmp/tmp.98d5udwadx + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.40ZIkkeOze/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.pkzLlZ7eAN ++ mktemp + local LAST_ERR=/tmp/tmp.im7Cl7FiFJ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.40ZIkkeOze/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.pkzLlZ7eAN serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.im7Cl7FiFJ + rm /tmp/tmp.pkzLlZ7eAN /tmp/tmp.im7Cl7FiFJ + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sMRJ8eeOnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.mfp0GrPF4k ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.sMRJ8eeOnk ++ cat /tmp/tmp.mfp0GrPF4k ++ rm /tmp/tmp.sMRJ8eeOnk /tmp/tmp.mfp0GrPF4k ++ return 0 + wait_pod postgres-operator-58c86b9b64-qlmzd + local pod=postgres-operator-58c86b9b64-qlmzd + local ns= + set +o xtrace postgres-operator-58c86b9b64-qlmzdtrue + [[ -n '' ]] + cluster=some-name + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=affinity-4406 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name affinity-4406 + local cluster=some-name + local namespace=affinity-4406 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n affinity-4406 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.eYRNcEg0FM ++ mktemp + local LAST_ERR=/tmp/tmp.xGkf7SJWDj + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eYRNcEg0FM secret/some-name-users-secret created + cat /tmp/tmp.xGkf7SJWDj + rm /tmp/tmp.eYRNcEg0FM /tmp/tmp.xGkf7SJWDj + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n affinity-4406 apply -f - + yq w - metadata.name some-name-pmm-secret ++ mktemp + local LAST_OUT=/tmp/tmp.e9X0Qdw25H ++ mktemp + local LAST_ERR=/tmp/tmp.434Vh98Okp + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.e9X0Qdw25H secret/some-name-pmm-secret created + cat /tmp/tmp.434Vh98Okp + rm /tmp/tmp.e9X0Qdw25H /tmp/tmp.434Vh98Okp + return 0 + kubectl_bin -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.p5avbn6IIP ++ mktemp + local LAST_ERR=/tmp/tmp.bdkpJ7V8wJ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.p5avbn6IIP deployment.apps/pg-client created + cat /tmp/tmp.bdkpJ7V8wJ + rm /tmp/tmp.p5avbn6IIP /tmp/tmp.bdkpJ7V8wJ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.labels.deployment-name some-name + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.name some-name + yq w - spec.secretsName some-name-users-secret + yq w - metadata.name some-name + yq w - spec.keepData false + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.keepBackups false + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - metadata.labels.pg-cluster some-name + yq w - spec.user some-name + yq w - spec.name some-name + yq w - spec.database some-name + yq w - spec.clustername some-name + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.QU7kzNAzI1 ++ mktemp + local LAST_ERR=/tmp/tmp.POezdFAEnu + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.QU7kzNAzI1 perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.POezdFAEnu + rm /tmp/tmp.QU7kzNAzI1 /tmp/tmp.POezdFAEnu + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name.......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-4406 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ripFYWb1mN +++ mktemp ++ local LAST_ERR=/tmp/tmp.lpQrhS3C9c ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ripFYWb1mN ++ cat /tmp/tmp.lpQrhS3C9c ++ rm /tmp/tmp.ripFYWb1mN /tmp/tmp.lpQrhS3C9c ++ return 0 + status_query='kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MzpcPdbs1M ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fsGF6wSJ7w +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.MzpcPdbs1M +++ cat /tmp/tmp.fsGF6wSJ7w +++ rm /tmp/tmp.MzpcPdbs1M /tmp/tmp.fsGF6wSJ7w +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yrJHrQuInp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UCwnxDdObc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.yrJHrQuInp +++ cat /tmp/tmp.UCwnxDdObc +++ rm /tmp/tmp.yrJHrQuInp /tmp/tmp.UCwnxDdObc +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lwbjbGXoex ++++ mktemp +++ local LAST_ERR=/tmp/tmp.t6EMNg09Hi +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.lwbjbGXoex +++ cat /tmp/tmp.t6EMNg09Hi +++ rm /tmp/tmp.lwbjbGXoex /tmp/tmp.t6EMNg09Hi +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2SHGWRYqKH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gyIu7qtmfE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.2SHGWRYqKH +++ cat /tmp/tmp.gyIu7qtmfE +++ rm /tmp/tmp.2SHGWRYqKH /tmp/tmp.gyIu7qtmfE +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mmMeseVT3R +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.WVFPvJkPk3 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.mmMeseVT3R ++++ cat /tmp/tmp.WVFPvJkPk3 ++++ rm /tmp/tmp.mmMeseVT3R /tmp/tmp.WVFPvJkPk3 ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rNhGvKM6Q4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AG8J1D53jZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.rNhGvKM6Q4 +++ cat /tmp/tmp.AG8J1D53jZ +++ rm /tmp/tmp.rNhGvKM6Q4 /tmp/tmp.AG8J1D53jZ +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.affinity-4406 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jOAQSWHagC +++ mktemp ++ local LAST_ERR=/tmp/tmp.cs8GajQt3Q ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jOAQSWHagC ++ cat /tmp/tmp.cs8GajQt3Q ++ rm /tmp/tmp.jOAQSWHagC /tmp/tmp.cs8GajQt3Q ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.22UHFqltLF ++ mktemp + local LAST_ERR=/tmp/tmp.AAOusrYBQB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.22UHFqltLF + cat /tmp/tmp.AAOusrYBQB + rm /tmp/tmp.22UHFqltLF /tmp/tmp.AAOusrYBQB + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1ocPt1mVar ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bOzNEtj0a0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1ocPt1mVar +++ cat /tmp/tmp.bOzNEtj0a0 +++ rm /tmp/tmp.1ocPt1mVar /tmp/tmp.bOzNEtj0a0 +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OhjNr2qx2g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uUp149Hyrr +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OhjNr2qx2g +++ cat /tmp/tmp.uUp149Hyrr +++ rm /tmp/tmp.OhjNr2qx2g /tmp/tmp.uUp149Hyrr +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9RbFHg7t86 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.B1ATUVCrYl +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9RbFHg7t86 +++ cat /tmp/tmp.B1ATUVCrYl +++ rm /tmp/tmp.9RbFHg7t86 /tmp/tmp.B1ATUVCrYl +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.m459OKun1x +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LwGA6hGce9 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.m459OKun1x ++++ cat /tmp/tmp.LwGA6hGce9 ++++ rm /tmp/tmp.m459OKun1x /tmp/tmp.LwGA6hGce9 ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vzTohWQrpo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6rO8kWlG5I +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.vzTohWQrpo +++ cat /tmp/tmp.6rO8kWlG5I +++ rm /tmp/tmp.vzTohWQrpo /tmp/tmp.6rO8kWlG5I +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.affinity-4406 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6P2EXk5VZT +++ mktemp ++ local LAST_ERR=/tmp/tmp.aOGCJuG8aG ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6P2EXk5VZT ++ cat /tmp/tmp.aOGCJuG8aG ++ rm /tmp/tmp.6P2EXk5VZT /tmp/tmp.aOGCJuG8aG ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.y42zkFR9h8 ++ mktemp + local LAST_ERR=/tmp/tmp.aal0FVHUI5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.y42zkFR9h8 + cat /tmp/tmp.aal0FVHUI5 + rm /tmp/tmp.y42zkFR9h8 /tmp/tmp.aal0FVHUI5 + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Hq2XZaj8Sf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uaqckbs20d +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Hq2XZaj8Sf +++ cat /tmp/tmp.uaqckbs20d +++ rm /tmp/tmp.Hq2XZaj8Sf /tmp/tmp.uaqckbs20d +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.os3ihfLdeZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VUhzd1Exed +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.os3ihfLdeZ +++ cat /tmp/tmp.VUhzd1Exed +++ rm /tmp/tmp.os3ihfLdeZ /tmp/tmp.VUhzd1Exed +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AxoNsoMSdc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5Kz216GijF +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.AxoNsoMSdc +++ cat /tmp/tmp.5Kz216GijF +++ rm /tmp/tmp.AxoNsoMSdc /tmp/tmp.5Kz216GijF +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qf1KOXxAKE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5oqigHzpTD ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.qf1KOXxAKE ++++ cat /tmp/tmp.5oqigHzpTD ++++ rm /tmp/tmp.qf1KOXxAKE /tmp/tmp.5oqigHzpTD ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.leVg0B5dF2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PCLXarY7Ct +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.leVg0B5dF2 +++ cat /tmp/tmp.PCLXarY7Ct +++ rm /tmp/tmp.leVg0B5dF2 /tmp/tmp.PCLXarY7Ct +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.affinity-4406 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XlzrcRskPH +++ mktemp ++ local LAST_ERR=/tmp/tmp.W6t1zkyGAV ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.XlzrcRskPH ++ cat /tmp/tmp.W6t1zkyGAV ++ rm /tmp/tmp.XlzrcRskPH /tmp/tmp.W6t1zkyGAV ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Z4DKwcuetM ++ mktemp + local LAST_ERR=/tmp/tmp.NwP2gDI2tb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Z4DKwcuetM + cat /tmp/tmp.NwP2gDI2tb + rm /tmp/tmp.Z4DKwcuetM /tmp/tmp.NwP2gDI2tb + return 0 + sleep 10 + compare_kubectl deployment/some-name + local resource=deployment/some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name + yq d - '**.creationTimestamp' ++ mktemp + yq d - '**.namespace' + local LAST_OUT=/tmp/tmp.LTyNxHyezh + yq d - '**.uid' + yq d - metadata.selfLink + yq d - metadata.resourceVersion ++ mktemp + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + local LAST_ERR=/tmp/tmp.P9Xd0EFURH + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.finalizers' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - spec.nodeName + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."kubernetes.io/pvc-protection"' ++ seq 0 2 + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.creationTimestamp' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name + yq d - spec.volumeMode + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - status + yq d - '**."percona.com/*"' + yq d - '**.(name==suffix)' + yq d - '**.volumeName' + yq d - '**.enableServiceLinks' + yq d - '**.imagePullSecrets' + yq d - '**.(name==COMMAND_OPTS)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.(name==PODNAME)' + yq d - '**.ipFamilies' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(name==NAMESPACE)' + yq d - '**.ipFamilyPolicy' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.storageClassName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.internalTrafficPolicy' + yq d - '**.nodePort' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.controller-uid' + yq d - '**.healthCheckNodePort' + yq d - '**.preemptionPolicy' + yq d - metadata.managedFields + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.procMount' + yq d - '**.dataSource' + yq d - '**.clusterIPs' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LTyNxHyezh + cat /tmp/tmp.P9Xd0EFURH + rm /tmp/tmp.LTyNxHyezh /tmp/tmp.P9Xd0EFURH + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name.yml /tmp/tmp.40ZIkkeOze/deployment_some-name.yml + compare_kubectl deployment/some-name-backrest-shared-repo + local resource=deployment/some-name-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-backrest-shared-repo.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-backrest-shared-repo + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - '**.uid' + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."kubernetes.io/psp"' + local LAST_OUT=/tmp/tmp.uWB18UzLsp + yq d - '**.image' + yq d - '**.clusterIP' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.clusterIPs' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.creationTimestamp' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' ++ mktemp + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - spec.volumeMode + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.ipFamilies' + yq d - '**.finalizers' + yq d - '**.(name==PODNAME)' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - spec.nodeName + yq d - '**.healthCheckNodePort' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.dataSource' + yq d - '**.nodePort' + local LAST_ERR=/tmp/tmp.yisYQ4NPsQ + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.imagePullSecrets' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.enableServiceLinks' + yq d - '**.(name==COMMAND_OPTS)' + yq d - metadata.selfLink + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.internalTrafficPolicy' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.ipFamilyPolicy' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - status ++ seq 0 2 + yq d - '**.(name==suffix)' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name-backrest-shared-repo + yq d - '**.(name==NAMESPACE)' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.uWB18UzLsp + cat /tmp/tmp.yisYQ4NPsQ + rm /tmp/tmp.uWB18UzLsp /tmp/tmp.yisYQ4NPsQ + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-backrest-shared-repo.yml /tmp/tmp.40ZIkkeOze/deployment_some-name-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name + local resource=job/backrest-backup-some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name.yml + local new_result=/tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml job/backrest-backup-some-name + yq d - '**.creationTimestamp' ++ mktemp + yq d - '**.namespace' + yq d - metadata.resourceVersion + yq d - metadata.deletionTimestamp + local LAST_OUT=/tmp/tmp.jvZzteAVmF + yq d - metadata.selfLink + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' ++ mktemp + yq d - '**.finalizers' + yq d - '**.procMount' + local LAST_ERR=/tmp/tmp.YIjNJ81MCv + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.storageClassName' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.volumeName' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."percona.com/*"' + yq d - spec.volumeMode + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.nodePort' + yq d - '**.imagePullSecrets' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(volumeMode==Filesystem).volumeMode' ++ seq 0 2 + yq d - '**.preemptionPolicy' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.healthCheckNodePort' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.image' + yq d - '**.enableServiceLinks' + yq d - '**.internalTrafficPolicy' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name + yq d - '**.clusterIP' + yq d - status + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.controller-uid' + yq d - '**.(name==suffix)' + yq d - '**.ipFamilies' + yq d - '**.ipFamilyPolicy' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==PODNAME)' + yq d - '**.creationTimestamp' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.uid' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jvZzteAVmF + cat /tmp/tmp.YIjNJ81MCv + rm /tmp/tmp.jvZzteAVmF /tmp/tmp.YIjNJ81MCv + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml kind ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml metadata.annotations + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml metadata.generation + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml spec.completionMode + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name.yml /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name.yml + create_backup some-name preferred + local cluster=some-name + local bckp_prefix=preferred + local bckp_type=full + yq w - metadata.name preferred-some-name + yq w - spec.name preferred-some-name + yq w - spec.namespace affinity-4406 + yq w - spec.parameters.job-name preferred-some-name + kubectl_bin apply -f - + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.pg-cluster some-name ++ mktemp ++ kubectl_bin get pods --selector=name=some-name-backrest-shared-repo,pg-cluster=some-name -o 'jsonpath={.items[].metadata.name}' + local LAST_OUT=/tmp/tmp.aTa1fF3l3z + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name ++ mktemp +++ mktemp + local LAST_ERR=/tmp/tmp.98BD0JzuBH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_OUT=/tmp/tmp.jrgiSCAd6X +++ mktemp + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - ++ local LAST_ERR=/tmp/tmp.HxSPVeAxfg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=some-name-backrest-shared-repo,pg-cluster=some-name -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jrgiSCAd6X ++ cat /tmp/tmp.HxSPVeAxfg ++ rm /tmp/tmp.jrgiSCAd6X /tmp/tmp.HxSPVeAxfg ++ return 0 + yq w - spec.parameters.podname some-name-backrest-shared-repo-6f8f947dbc-gvhsr + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.aTa1fF3l3z pgtask.pg.percona.com/preferred-some-name created + cat /tmp/tmp.98BD0JzuBH + rm /tmp/tmp.aTa1fF3l3z /tmp/tmp.98BD0JzuBH + return 0 + sleep 10 + wait_job_completion preferred-some-name + local job=preferred-some-name + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' preferred-some-name+ retry=0 ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete perconapgcluster some-name ++ mktemp + local LAST_OUT=/tmp/tmp.XRTMrlVjHa ++ mktemp + local LAST_ERR=/tmp/tmp.KX1jBtb7ju + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete perconapgcluster some-name + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XRTMrlVjHa perconapgcluster.pg.percona.com "some-name" deleted + cat /tmp/tmp.KX1jBtb7ju + rm /tmp/tmp.XRTMrlVjHa /tmp/tmp.KX1jBtb7ju + return 0 + sleep 30 + cluster=some-name-required + spinup_pgcluster some-name-required /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml '' '' required + local cluster=some-name-required + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=required + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=affinity-4406 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-required affinity-4406 + local cluster=some-name-required + local namespace=affinity-4406 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + yq w - metadata.name some-name-required-users-secret + kubectl_bin -n affinity-4406 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KdqdepJlJB ++ mktemp + local LAST_ERR=/tmp/tmp.TGvkuwvMZq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.KdqdepJlJB secret/some-name-required-users-secret created + cat /tmp/tmp.TGvkuwvMZq + rm /tmp/tmp.KdqdepJlJB /tmp/tmp.TGvkuwvMZq + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n affinity-4406 apply -f - + yq w - metadata.name some-name-required-pmm-secret ++ mktemp + local LAST_OUT=/tmp/tmp.nkcN6ceawP ++ mktemp + local LAST_ERR=/tmp/tmp.Yb6W1fw6uT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nkcN6ceawP secret/some-name-required-pmm-secret created + cat /tmp/tmp.Yb6W1fw6uT + rm /tmp/tmp.nkcN6ceawP /tmp/tmp.Yb6W1fw6uT + return 0 + kubectl_bin -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.TRqdQjhslp ++ mktemp + local LAST_ERR=/tmp/tmp.OvM7SSN7HF + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TRqdQjhslp deployment.apps/pg-client unchanged + cat /tmp/tmp.OvM7SSN7HF + rm /tmp/tmp.TRqdQjhslp /tmp/tmp.OvM7SSN7HF + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name-required false false false false required false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name-required + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=required + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name-required + yq w - metadata.labels.crunchy-pgha-scope some-name-required + yq w - spec.database some-name-required + yq w - spec.clustername some-name-required + yq w - metadata.labels.pg-cluster some-name-required + yq w - metadata.name some-name-required + yq w - metadata.labels.name some-name-required + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.keepBackups false + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.user some-name + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.secretsName some-name-required-users-secret + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.keepData false + yq w - metadata.labels.deployment-name some-name-required + yq w - spec.name some-name-required + case ${podAntiAffinity} in + yq w /tmp/tmp.40ZIkkeOze/cr.yaml spec.pgPrimary.affinity.antiAffinityType required + yq w - spec.backup.affinity.antiAffinityType required + yq w - spec.pgBouncer.affinity.antiAffinityType required + mv /tmp/tmp.40ZIkkeOze/cr.podAffinity.yaml /tmp/tmp.40ZIkkeOze/cr.yaml + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.rvd8jJVDNF ++ mktemp + local LAST_ERR=/tmp/tmp.hrGyKavE5S + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.rvd8jJVDNF perconapgcluster.pg.percona.com/some-name-required created + cat /tmp/tmp.hrGyKavE5S + rm /tmp/tmp.rvd8jJVDNF /tmp/tmp.hrGyKavE5S + return 0 + wait_deployment some-name-required-backrest-shared-repo + local name=some-name-required-backrest-shared-repo + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name-required-backrest-shared-repo.. + wait_deployment some-name-required + local name=some-name-required + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name-required......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-required-stanza-create false + local job=some-name-required-stanza-create + local mandatory=false + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' some-name-required-stanza-create+ retry=0 ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-required-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name-required + local job=backrest-backup-some-name-required + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' backrest-backup-some-name-required+ retry=0 ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-required + cluster_name=some-name-required + wait_cluster_status some-name-required 'pgcluster Initialized' + local cluster_name=some-name-required + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-4406 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iM1KoJuyfo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ft9xd1ZV1E ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.iM1KoJuyfo ++ cat /tmp/tmp.Ft9xd1ZV1E ++ rm /tmp/tmp.iM1KoJuyfo /tmp/tmp.Ft9xd1ZV1E ++ return 0 + status_query='kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SekS0ShniD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5kS3n2LapZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.SekS0ShniD +++ cat /tmp/tmp.5kS3n2LapZ +++ rm /tmp/tmp.SekS0ShniD /tmp/tmp.5kS3n2LapZ +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UGCz8sPOHB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9nztFxq61Q +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UGCz8sPOHB +++ cat /tmp/tmp.9nztFxq61Q +++ rm /tmp/tmp.UGCz8sPOHB /tmp/tmp.9nztFxq61Q +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4BE9WIm8ge ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3Y94wEX4og +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.4BE9WIm8ge +++ cat /tmp/tmp.3Y94wEX4og +++ rm /tmp/tmp.4BE9WIm8ge /tmp/tmp.3Y94wEX4og +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.v5gm78gKQ8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0KAJYRj4nD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.v5gm78gKQ8 +++ cat /tmp/tmp.0KAJYRj4nD +++ rm /tmp/tmp.v5gm78gKQ8 /tmp/tmp.0KAJYRj4nD +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VOji7VsKRt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8RWGDaLSnv +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.VOji7VsKRt +++ cat /tmp/tmp.8RWGDaLSnv +++ rm /tmp/tmp.VOji7VsKRt /tmp/tmp.8RWGDaLSnv +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wLJw0O9C2t +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pmyE24D344 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.wLJw0O9C2t ++++ cat /tmp/tmp.pmyE24D344 ++++ rm /tmp/tmp.wLJw0O9C2t /tmp/tmp.pmyE24D344 ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wLQMktECrX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ykmg0oy6dK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wLQMktECrX +++ cat /tmp/tmp.Ykmg0oy6dK +++ rm /tmp/tmp.wLQMktECrX /tmp/tmp.Ykmg0oy6dK +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-required.affinity-4406 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-required.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sC4Y7JLXEy +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wajx5V80e2 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.sC4Y7JLXEy ++ cat /tmp/tmp.Wajx5V80e2 ++ rm /tmp/tmp.sC4Y7JLXEy /tmp/tmp.Wajx5V80e2 ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.M5tNj5QhpI ++ mktemp + local LAST_ERR=/tmp/tmp.vHBP1XboA6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.M5tNj5QhpI + cat /tmp/tmp.vHBP1XboA6 + rm /tmp/tmp.M5tNj5QhpI /tmp/tmp.vHBP1XboA6 + return 0 ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Tj0ylOyreN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DGbkpYJ5In +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Tj0ylOyreN +++ cat /tmp/tmp.DGbkpYJ5In +++ rm /tmp/tmp.Tj0ylOyreN /tmp/tmp.DGbkpYJ5In +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yYCEZqSF2X ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CpnU3QnCiM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.yYCEZqSF2X +++ cat /tmp/tmp.CpnU3QnCiM +++ rm /tmp/tmp.yYCEZqSF2X /tmp/tmp.CpnU3QnCiM +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AVbe5qYxJ4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PZJiQruKH9 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.AVbe5qYxJ4 +++ cat /tmp/tmp.PZJiQruKH9 +++ rm /tmp/tmp.AVbe5qYxJ4 /tmp/tmp.PZJiQruKH9 +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bIqbTyCZC6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TMOuWRmK06 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.bIqbTyCZC6 ++++ cat /tmp/tmp.TMOuWRmK06 ++++ rm /tmp/tmp.bIqbTyCZC6 /tmp/tmp.TMOuWRmK06 ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HGSXPUF3il ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dl8XfoiJdh +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.HGSXPUF3il +++ cat /tmp/tmp.Dl8XfoiJdh +++ rm /tmp/tmp.HGSXPUF3il /tmp/tmp.Dl8XfoiJdh +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-required.affinity-4406 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-required.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3hJ7Thr8MB +++ mktemp ++ local LAST_ERR=/tmp/tmp.vnBVHpqpGw ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.3hJ7Thr8MB ++ cat /tmp/tmp.vnBVHpqpGw ++ rm /tmp/tmp.3hJ7Thr8MB /tmp/tmp.vnBVHpqpGw ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.itm7UqZDNd ++ mktemp + local LAST_ERR=/tmp/tmp.u0dWID1cT9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.itm7UqZDNd + cat /tmp/tmp.u0dWID1cT9 + rm /tmp/tmp.itm7UqZDNd /tmp/tmp.u0dWID1cT9 + return 0 ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fgemsxYZgA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oZltSQUBpg +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fgemsxYZgA +++ cat /tmp/tmp.oZltSQUBpg +++ rm /tmp/tmp.fgemsxYZgA /tmp/tmp.oZltSQUBpg +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EPBUQW3l20 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Nb6UW0SU5N +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.EPBUQW3l20 +++ cat /tmp/tmp.Nb6UW0SU5N +++ rm /tmp/tmp.EPBUQW3l20 /tmp/tmp.Nb6UW0SU5N +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HKocTPMqS6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p1bfre05Qp +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.HKocTPMqS6 +++ cat /tmp/tmp.p1bfre05Qp +++ rm /tmp/tmp.HKocTPMqS6 /tmp/tmp.p1bfre05Qp +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lhgFMDwabo +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hdnQOaH3RM ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.lhgFMDwabo ++++ cat /tmp/tmp.hdnQOaH3RM ++++ rm /tmp/tmp.lhgFMDwabo /tmp/tmp.hdnQOaH3RM ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kKHrSkJ25B ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xTQ4ZslrS2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kKHrSkJ25B +++ cat /tmp/tmp.xTQ4ZslrS2 +++ rm /tmp/tmp.kKHrSkJ25B /tmp/tmp.xTQ4ZslrS2 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-required.affinity-4406 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-required.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nz81mdq7wT +++ mktemp ++ local LAST_ERR=/tmp/tmp.QjbSuwJa0X ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Nz81mdq7wT ++ cat /tmp/tmp.QjbSuwJa0X ++ rm /tmp/tmp.Nz81mdq7wT /tmp/tmp.QjbSuwJa0X ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.JmzxKi8lDQ ++ mktemp + local LAST_ERR=/tmp/tmp.QhEjGtaVtX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.JmzxKi8lDQ + cat /tmp/tmp.QhEjGtaVtX + rm /tmp/tmp.JmzxKi8lDQ /tmp/tmp.QhEjGtaVtX + return 0 + sleep 10 + compare_kubectl deployment/some-name-required + local resource=deployment/some-name-required + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-required.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name-required.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-required + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.selfLink + yq d - '**.uid' + local LAST_OUT=/tmp/tmp.aqYfChQUEh + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.image' + yq d - '**.clusterIPs' ++ mktemp + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**.dataSource' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**.procMount' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.enableServiceLinks' + local LAST_ERR=/tmp/tmp.o5jWRvAw2s + local exit_status=0 + local timeout=4 + set +o errexit + yq d - spec.nodeName + yq d - '**.creationTimestamp' + yq d - '**.(name==suffix)' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.nodePort' + yq d - '**.preemptionPolicy' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.clusterIP' + yq d - '**.healthCheckNodePort' + yq d - '**.ipFamilies' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.imagePullSecrets' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(name==NAMESPACE)' + yq d - status + yq d - '**."percona.com/*"' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==PODNAME)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.controller-uid' + yq d - '**.ipFamilyPolicy' + yq d - spec.volumeMode ++ seq 0 2 + yq d - '**.internalTrafficPolicy' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name-required + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.volumeName' + yq d - metadata.resourceVersion + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.aqYfChQUEh + cat /tmp/tmp.o5jWRvAw2s + rm /tmp/tmp.aqYfChQUEh /tmp/tmp.o5jWRvAw2s + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name-required.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-required.yml /tmp/tmp.40ZIkkeOze/deployment_some-name-required.yml + compare_kubectl deployment/some-name-required-backrest-shared-repo + local resource=deployment/some-name-required-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-required-backrest-shared-repo.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name-required-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-required-backrest-shared-repo + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - '**.uid' + yq d - metadata.deletionTimestamp + local LAST_OUT=/tmp/tmp.cD7fYzLBP3 + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.selfLink + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' ++ mktemp + yq d - '**."kubernetes.io/pvc-protection"' + local LAST_ERR=/tmp/tmp.JgjcSDGH4Q + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.nodePort' + yq d - '**.enableServiceLinks' + yq d - status + yq d - '**.internalTrafficPolicy' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.imagePullSecrets' + yq d - '**."percona.com/*"' + yq d - '**.(name==PODNAME)' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.(name==NAMESPACE)' + yq d - '**.volumeName' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.healthCheckNodePort' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' ++ seq 0 2 + yq d - '**.(name==suffix)' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name-required-backrest-shared-repo + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.controller-uid' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilyPolicy' + yq d - '**.clusterIP' + yq d - '**.image' + yq d - '**.ipFamilies' + yq d - '**.creationTimestamp' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.cD7fYzLBP3 + cat /tmp/tmp.JgjcSDGH4Q + rm /tmp/tmp.cD7fYzLBP3 /tmp/tmp.JgjcSDGH4Q + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name-required-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-required-backrest-shared-repo.yml /tmp/tmp.40ZIkkeOze/deployment_some-name-required-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name-required + local resource=job/backrest-backup-some-name-required + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name-required.yml + local new_result=/tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - '**.uid' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.resourceVersion + yq d - '**.image' + yq d - '**.procMount' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.clusterIPs' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.clusterIP' + yq d - '**.creationTimestamp' + yq d - '**.imagePullSecrets' + yq d - '**.volumeName' + yq d - '**.enableServiceLinks' + yq d - '**.(name==suffix)' + yq d - spec.nodeName + yq d - '**.nodePort' + yq d - spec.volumeMode + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."percona.com/*"' + yq d - status + yq d - '**.(name==PODNAME)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**.ipFamilies' + yq d - '**.dataSource' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilyPolicy' + yq d - '**.internalTrafficPolicy' + yq d - metadata.deletionTimestamp + yq d - metadata.selfLink + kubectl_bin get -o yaml job/backrest-backup-some-name-required ++ mktemp + local LAST_OUT=/tmp/tmp.KPymsjZHz8 ++ mktemp + local LAST_ERR=/tmp/tmp.LS9VDQ6FLN + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name-required + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.KPymsjZHz8 + cat /tmp/tmp.LS9VDQ6FLN + rm /tmp/tmp.KPymsjZHz8 /tmp/tmp.LS9VDQ6FLN + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml kind ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml metadata.annotations + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml metadata.generation + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml spec.completionMode + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name-required.yml /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-required.yml + create_backup some-name-required required + local cluster=some-name-required + local bckp_prefix=required + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name-required + yq w - spec.name required-some-name-required + yq w - spec.namespace affinity-4406 + kubectl_bin apply -f - + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.pg-cluster some-name-required + yq w - spec.parameters.job-name required-some-name-required ++ kubectl_bin get pods --selector=name=some-name-required-backrest-shared-repo,pg-cluster=some-name-required -o 'jsonpath={.items[].metadata.name}' + yq w - metadata.name required-some-name-required ++ mktemp + local LAST_OUT=/tmp/tmp.ooDiFCYwNw +++ mktemp ++ mktemp ++ local LAST_OUT=/tmp/tmp.bsNhSBgv8M + local LAST_ERR=/tmp/tmp.0Fv2via7Es + local exit_status=0 + local timeout=4 + set +o errexit +++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - ++ local LAST_ERR=/tmp/tmp.as8Qi4LQ9W ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=some-name-required-backrest-shared-repo,pg-cluster=some-name-required -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.bsNhSBgv8M ++ cat /tmp/tmp.as8Qi4LQ9W ++ rm /tmp/tmp.bsNhSBgv8M /tmp/tmp.as8Qi4LQ9W ++ return 0 + yq w - spec.parameters.podname some-name-required-backrest-shared-repo-8558c9b6fc-l79mk + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ooDiFCYwNw pgtask.pg.percona.com/required-some-name-required created + cat /tmp/tmp.0Fv2via7Es + rm /tmp/tmp.ooDiFCYwNw /tmp/tmp.0Fv2via7Es + return 0 + sleep 10 + wait_job_completion required-some-name-required + local job=required-some-name-required + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' required-some-name-required+ retry=0 ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete perconapgcluster some-name-required ++ mktemp + local LAST_OUT=/tmp/tmp.egm6OEMNG6 ++ mktemp + local LAST_ERR=/tmp/tmp.HKP2qZP57m + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete perconapgcluster some-name-required + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.egm6OEMNG6 perconapgcluster.pg.percona.com "some-name-required" deleted + cat /tmp/tmp.HKP2qZP57m + rm /tmp/tmp.egm6OEMNG6 /tmp/tmp.HKP2qZP57m + return 0 + sleep 30 + cluster=some-name-disabled + spinup_pgcluster some-name-disabled /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml '' '' disabled + local cluster=some-name-disabled + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=disabled + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=affinity-4406 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-disabled affinity-4406 + local cluster=some-name-disabled + local namespace=affinity-4406 + yq w - metadata.name some-name-disabled-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n affinity-4406 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qNL8cEVMsO ++ mktemp + local LAST_ERR=/tmp/tmp.nib6jeeQuq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qNL8cEVMsO secret/some-name-disabled-users-secret created + cat /tmp/tmp.nib6jeeQuq + rm /tmp/tmp.qNL8cEVMsO /tmp/tmp.nib6jeeQuq + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n affinity-4406 apply -f - + yq w - metadata.name some-name-disabled-pmm-secret ++ mktemp + local LAST_OUT=/tmp/tmp.yUDu1lErGB ++ mktemp + local LAST_ERR=/tmp/tmp.pyERAehQDG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yUDu1lErGB secret/some-name-disabled-pmm-secret created + cat /tmp/tmp.pyERAehQDG + rm /tmp/tmp.yUDu1lErGB /tmp/tmp.pyERAehQDG + return 0 + kubectl_bin -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IWeyHAoaKj ++ mktemp + local LAST_ERR=/tmp/tmp.hl9n2y77ha + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IWeyHAoaKj deployment.apps/pg-client unchanged + cat /tmp/tmp.hl9n2y77ha + rm /tmp/tmp.IWeyHAoaKj /tmp/tmp.hl9n2y77ha + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name-disabled false false false false disabled false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name-disabled + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=disabled + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name-disabled + yq w - metadata.labels.crunchy-pgha-scope some-name-disabled + yq w - metadata.labels.deployment-name some-name-disabled + yq w - spec.name some-name-disabled + yq w - spec.keepData false + yq w - spec.clustername some-name-disabled + yq w - spec.database some-name-disabled + yq w - metadata.labels.pg-cluster some-name-disabled + yq w - spec.secretsName some-name-disabled-users-secret + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.user some-name + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.keepBackups false + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - metadata.labels.name some-name-disabled + yq w - metadata.name some-name-disabled + case ${podAntiAffinity} in + yq w - spec.backup.affinity.antiAffinityType disabled + yq w /tmp/tmp.40ZIkkeOze/cr.yaml spec.pgPrimary.affinity.antiAffinityType disabled + yq w - spec.pgBouncer.affinity.antiAffinityType disabled + mv /tmp/tmp.40ZIkkeOze/cr.podAffinity.yaml /tmp/tmp.40ZIkkeOze/cr.yaml + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 ++ mktemp + local LAST_OUT=/tmp/tmp.tbtzmAd6ZW ++ mktemp + local LAST_ERR=/tmp/tmp.w4Jk5ovqrg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.40ZIkkeOze/cr.yaml -n affinity-4406 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.tbtzmAd6ZW perconapgcluster.pg.percona.com/some-name-disabled created + cat /tmp/tmp.w4Jk5ovqrg + rm /tmp/tmp.tbtzmAd6ZW /tmp/tmp.w4Jk5ovqrg + return 0 + wait_deployment some-name-disabled-backrest-shared-repo + local name=some-name-disabled-backrest-shared-repo + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name-disabled-backrest-shared-repo.... + wait_deployment some-name-disabled + local name=some-name-disabled + local target_namespace=affinity-4406 + sleep 10 + set +o xtrace some-name-disabled.......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-disabled-stanza-create false + local job=some-name-disabled-stanza-create + local mandatory=false + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' some-name-disabled-stanza-create+ retry=0 ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name-disabled + local job=backrest-backup-some-name-disabled + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' backrest-backup-some-name-disabled+ retry=0 ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-disabled + cluster_name=some-name-disabled + wait_cluster_status some-name-disabled 'pgcluster Initialized' + local cluster_name=some-name-disabled + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-4406 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r08qpI0dt0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XyZBytjdjn ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.r08qpI0dt0 ++ cat /tmp/tmp.XyZBytjdjn ++ rm /tmp/tmp.r08qpI0dt0 /tmp/tmp.XyZBytjdjn ++ return 0 + status_query='kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SHFqz4eu20 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VBCt0JFaMk +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.SHFqz4eu20 +++ cat /tmp/tmp.VBCt0JFaMk +++ rm /tmp/tmp.SHFqz4eu20 /tmp/tmp.VBCt0JFaMk +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NF6wV2zj02 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C60O5CMUfG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.NF6wV2zj02 +++ cat /tmp/tmp.C60O5CMUfG +++ rm /tmp/tmp.NF6wV2zj02 /tmp/tmp.C60O5CMUfG +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FuC9wbGWQf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gJKISbDZzY +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.FuC9wbGWQf +++ cat /tmp/tmp.gJKISbDZzY +++ rm /tmp/tmp.FuC9wbGWQf /tmp/tmp.gJKISbDZzY +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.D7xA4bIuj4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pafr3vf4HP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.D7xA4bIuj4 +++ cat /tmp/tmp.pafr3vf4HP +++ rm /tmp/tmp.D7xA4bIuj4 /tmp/tmp.pafr3vf4HP +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ax4Pc26fiG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Wfhum4gWlu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ax4Pc26fiG +++ cat /tmp/tmp.Wfhum4gWlu +++ rm /tmp/tmp.ax4Pc26fiG /tmp/tmp.Wfhum4gWlu +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cu8sgYhW3D +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dtzIf4p5zU ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.cu8sgYhW3D ++++ cat /tmp/tmp.dtzIf4p5zU ++++ rm /tmp/tmp.cu8sgYhW3D /tmp/tmp.dtzIf4p5zU ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZLOJYYfwCL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tgvNrdmjmH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ZLOJYYfwCL +++ cat /tmp/tmp.tgvNrdmjmH +++ rm /tmp/tmp.ZLOJYYfwCL /tmp/tmp.tgvNrdmjmH +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-disabled.affinity-4406 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-disabled.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yJ2VoxiLzF +++ mktemp ++ local LAST_ERR=/tmp/tmp.n3SeDE0XIZ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.yJ2VoxiLzF ++ cat /tmp/tmp.n3SeDE0XIZ ++ rm /tmp/tmp.yJ2VoxiLzF /tmp/tmp.n3SeDE0XIZ ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.JrWOVHeH4u ++ mktemp + local LAST_ERR=/tmp/tmp.gGvHHyBh0P + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.JrWOVHeH4u + cat /tmp/tmp.gGvHHyBh0P + rm /tmp/tmp.JrWOVHeH4u /tmp/tmp.gGvHHyBh0P + return 0 ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.U8wfBaTAeb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JNXJdVU9q6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.U8wfBaTAeb +++ cat /tmp/tmp.JNXJdVU9q6 +++ rm /tmp/tmp.U8wfBaTAeb /tmp/tmp.JNXJdVU9q6 +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6gKdWISst7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OZLmF5LaSM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6gKdWISst7 +++ cat /tmp/tmp.OZLmF5LaSM +++ rm /tmp/tmp.6gKdWISst7 /tmp/tmp.OZLmF5LaSM +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.abUVfPKbXY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.af4QJKGcxT +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.abUVfPKbXY +++ cat /tmp/tmp.af4QJKGcxT +++ rm /tmp/tmp.abUVfPKbXY /tmp/tmp.af4QJKGcxT +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ieGRpAuokh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hUKlHWioPV ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.ieGRpAuokh ++++ cat /tmp/tmp.hUKlHWioPV ++++ rm /tmp/tmp.ieGRpAuokh /tmp/tmp.hUKlHWioPV ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dxq8PA4CjP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.K1A8W9MurG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dxq8PA4CjP +++ cat /tmp/tmp.K1A8W9MurG +++ rm /tmp/tmp.dxq8PA4CjP /tmp/tmp.K1A8W9MurG +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-disabled.affinity-4406 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-disabled.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.26gecQZHDf +++ mktemp ++ local LAST_ERR=/tmp/tmp.LTapKSNBNp ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.26gecQZHDf ++ cat /tmp/tmp.LTapKSNBNp ++ rm /tmp/tmp.26gecQZHDf /tmp/tmp.LTapKSNBNp ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.vFXO9dASjH ++ mktemp + local LAST_ERR=/tmp/tmp.CIz1suIFW2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.vFXO9dASjH + cat /tmp/tmp.CIz1suIFW2 + rm /tmp/tmp.vFXO9dASjH /tmp/tmp.CIz1suIFW2 + return 0 ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-4406 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6GuPysqFOH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UQIhhezyXD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6GuPysqFOH +++ cat /tmp/tmp.UQIhhezyXD +++ rm /tmp/tmp.6GuPysqFOH /tmp/tmp.UQIhhezyXD +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.i2SVsWdgwq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1Uz346Kh5J +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.i2SVsWdgwq +++ cat /tmp/tmp.1Uz346Kh5J +++ rm /tmp/tmp.i2SVsWdgwq /tmp/tmp.1Uz346Kh5J +++ return 0 +++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mnBbuSAOEA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qVl0ABSn1c +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.mnBbuSAOEA +++ cat /tmp/tmp.qVl0ABSn1c +++ rm /tmp/tmp.mnBbuSAOEA /tmp/tmp.qVl0ABSn1c +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EgV9SbNc2m +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NdPvFgoHjs ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n affinity-4406 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.EgV9SbNc2m ++++ cat /tmp/tmp.NdPvFgoHjs ++++ rm /tmp/tmp.EgV9SbNc2m /tmp/tmp.NdPvFgoHjs ++++ return 0 +++ kubectl_bin -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IpiLAvIOKq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.95inYuxpyt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n affinity-4406 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.IpiLAvIOKq +++ cat /tmp/tmp.95inYuxpyt +++ rm /tmp/tmp.IpiLAvIOKq /tmp/tmp.95inYuxpyt +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-disabled.affinity-4406 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-disabled.affinity-4406 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uf5nT7tMoZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nh14MAXffc ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n affinity-4406 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Uf5nT7tMoZ ++ cat /tmp/tmp.nh14MAXffc ++ rm /tmp/tmp.Uf5nT7tMoZ /tmp/tmp.nh14MAXffc ++ return 0 + local client_container=pg-client-76df9df876-hgk5m + kubectl_bin exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.D2lU6f9Ngc ++ mktemp + local LAST_ERR=/tmp/tmp.4MiXSCS2EO + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-hgk5m -n affinity-4406 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-4406.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.D2lU6f9Ngc + cat /tmp/tmp.4MiXSCS2EO + rm /tmp/tmp.D2lU6f9Ngc /tmp/tmp.4MiXSCS2EO + return 0 + sleep 10 + compare_kubectl deployment/some-name-disabled + local resource=deployment/some-name-disabled + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-disabled.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name-disabled.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-disabled + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - metadata.deletionTimestamp ++ mktemp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + local LAST_OUT=/tmp/tmp.XmNJY7CPqc + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' ++ mktemp + yq d - '**.image' + local LAST_ERR=/tmp/tmp.3X80h6Z9r9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + yq d - '**.clusterIP' + yq d - '**.creationTimestamp' + yq d - '**.volumeName' + yq d - metadata.selfLink + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name-disabled + yq d - '**.clusterIPs' + yq d - spec.nodeName + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - status + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - '**."percona.com/*"' + yq d - '**.healthCheckNodePort' + yq d - '**.(name==suffix)' + yq d - metadata.resourceVersion + yq d - '**.nodePort' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.ipFamilies' + yq d - '**.controller-uid' + yq d - '**.(name==PODNAME)' + yq d - '**.ipFamilyPolicy' + yq d - '**.preemptionPolicy' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.internalTrafficPolicy' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.uid' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XmNJY7CPqc + cat /tmp/tmp.3X80h6Z9r9 + rm /tmp/tmp.XmNJY7CPqc /tmp/tmp.3X80h6Z9r9 + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name-disabled.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-disabled.yml /tmp/tmp.40ZIkkeOze/deployment_some-name-disabled.yml + compare_kubectl deployment/some-name-disabled-backrest-shared-repo + local resource=deployment/some-name-disabled-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-disabled-backrest-shared-repo.yml + local new_result=/tmp/tmp.40ZIkkeOze/deployment_some-name-disabled-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + kubectl_bin get -o yaml deployment/some-name-disabled-backrest-shared-repo + yq d - '**.uid' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.resourceVersion + yq d - '**.namespace' ++ mktemp + yq d - metadata.deletionTimestamp + yq d - metadata.selfLink + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.procMount' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.finalizers' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.nodePort' + yq d - '**.volumeName' + local LAST_OUT=/tmp/tmp.EUUAJMGLcd + yq d - '**.imagePullSecrets' + yq d - '**.dataSource' + yq d - '**.storageClassName' + yq d - '**.enableServiceLinks' + yq d - status + yq d - '**.healthCheckNodePort' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.creationTimestamp' + yq d - '**.(name==suffix)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.image' + yq d - '**.(name==NAMESPACE)' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==COMMAND_OPTS)' + yq d - spec.nodeName + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**."percona.com/*"' + yq d - '**.(name==PODNAME)' + yq d - '**.controller-uid' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.ipFamilies' + yq d - '**.ipFamilyPolicy' + yq d - '**.internalTrafficPolicy' + yq d - '**.allocateLoadBalancerNodePorts' ++ mktemp + local LAST_ERR=/tmp/tmp.iIh4MMHq03 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name-disabled-backrest-shared-repo + yq d - 'metadata.annotations."kubernetes.io/psp"' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.EUUAJMGLcd + cat /tmp/tmp.iIh4MMHq03 + rm /tmp/tmp.EUUAJMGLcd /tmp/tmp.iIh4MMHq03 + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/deployment_some-name-disabled-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/deployment_some-name-disabled-backrest-shared-repo.yml /tmp/tmp.40ZIkkeOze/deployment_some-name-disabled-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name-disabled + local resource=job/backrest-backup-some-name-disabled + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name-disabled.yml + local new_result=/tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml job/backrest-backup-some-name-disabled + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.uid' + yq d - '**.namespace' + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.procMount' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.dataSource' + yq d - '**.healthCheckNodePort' + yq d - '**.enableServiceLinks' + yq d - '**.nodePort' + yq d - '**.imagePullSecrets' + yq d - status + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.volumeName' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilyPolicy' + yq d - '**.(name==PODNAME)' + yq d - '**."percona.com/*"' + yq d - '**.(name==suffix)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.finalizers' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.controller-uid' + yq d - '**.image' + yq d - '**.storageClassName' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.nodeName + yq d - '**.ipFamilies' + yq d - '**.internalTrafficPolicy' + yq d - spec.volumeMode + yq d - '**.creationTimestamp' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - metadata.selfLink + yq d - metadata.resourceVersion ++ mktemp + local LAST_OUT=/tmp/tmp.j7QfcrsFfp ++ mktemp + local LAST_ERR=/tmp/tmp.eecZG0IE7J + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name-disabled + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.j7QfcrsFfp + cat /tmp/tmp.eecZG0IE7J + rm /tmp/tmp.j7QfcrsFfp /tmp/tmp.eecZG0IE7J + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml kind ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml metadata.annotations + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml metadata.generation + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml spec.completionMode + yq d -i /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/affinity/compare/job_backrest-backup-some-name-disabled.yml /tmp/tmp.40ZIkkeOze/job_backrest-backup-some-name-disabled.yml + create_backup some-name-disabled disabled + local cluster=some-name-disabled + local bckp_prefix=disabled + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name-disabled + yq w - spec.name disabled-some-name-disabled + yq w - spec.parameters.job-name disabled-some-name-disabled + kubectl_bin apply -f - + yq w - spec.parameters.pg-cluster some-name-disabled + yq w - spec.namespace affinity-4406 + yq w - metadata.name disabled-some-name-disabled + yq w --style=single -- - spec.parameters.backrest-opts --type=full ++ mktemp ++ kubectl_bin get pods --selector=name=some-name-disabled-backrest-shared-repo,pg-cluster=some-name-disabled -o 'jsonpath={.items[].metadata.name}' + local LAST_OUT=/tmp/tmp.6dAvumKBKT +++ mktemp ++ mktemp ++ local LAST_OUT=/tmp/tmp.iSXWGGdGJH + local LAST_ERR=/tmp/tmp.uamqzy3Mf3 + local exit_status=0 + local timeout=4 +++ mktemp + set +o errexit ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.Q5kJvftBzS ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=some-name-disabled-backrest-shared-repo,pg-cluster=some-name-disabled -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.iSXWGGdGJH ++ cat /tmp/tmp.Q5kJvftBzS ++ rm /tmp/tmp.iSXWGGdGJH /tmp/tmp.Q5kJvftBzS ++ return 0 + yq w - spec.parameters.podname some-name-disabled-backrest-shared-repo-6d97b67686-xfkdf + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.6dAvumKBKT pgtask.pg.percona.com/disabled-some-name-disabled created + cat /tmp/tmp.uamqzy3Mf3 + rm /tmp/tmp.6dAvumKBKT /tmp/tmp.uamqzy3Mf3 + return 0 + sleep 10 + wait_job_completion disabled-some-name-disabled + local job=disabled-some-name-disabled + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' disabled-some-name-disabled+ retry=0 ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + destroy affinity-4406 + grep -v level=info + grep -v level=debug ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + grep -v 'Getting tasks for pod' +++ mktemp + grep -v 'Getting pods from source' + sort -u + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + tee /tmp/tmp.40ZIkkeOze/operator.log + grep -v 'get backup status: Job.batch' + grep -v 'the object has been modified' ++ local LAST_OUT=/tmp/tmp.C7CsGbMI6v +++ mktemp ++ local LAST_ERR=/tmp/tmp.4VvQkyP6Mo ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.C7CsGbMI6v ++ cat /tmp/tmp.4VvQkyP6Mo ++ rm /tmp/tmp.C7CsGbMI6v /tmp/tmp.4VvQkyP6Mo ++ return 0 + kubectl_bin logs postgres-operator-58c86b9b64-qlmzd -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.QoVVRsM4bU ++ mktemp + local LAST_ERR=/tmp/tmp.q1NBTuFduz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl logs postgres-operator-58c86b9b64-qlmzd -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.QoVVRsM4bU + cat /tmp/tmp.q1NBTuFduz + rm /tmp/tmp.QoVVRsM4bU /tmp/tmp.q1NBTuFduz + return 0 ] { } }, ] } }, { } }, ] { } }, { } ] } { } }, ] } ] { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ], { } }, }], { } }, }], } ], } }, } }, } }, { } }, }, 2024/04/24 19:05:31 INF 1 (localhost:4150) connecting to nsqd 2024/04/24 19:06:20 INF 2 (localhost:4150) connecting to nsqd 2024/04/24 19:06:20 INF 3 (localhost:4150) connecting to nsqd 2024/04/24 19:06:27 INF 4 (localhost:4150) connecting to nsqd 2024/04/24 19:06:55 INF 5 (localhost:4150) connecting to nsqd 2024/04/24 19:06:56 INF 6 (localhost:4150) connecting to nsqd 2024/04/24 19:06:56 INF 7 (localhost:4150) connecting to nsqd 2024/04/24 19:08:12 INF 8 (localhost:4150) connecting to nsqd 2024/04/24 19:08:41 INF 9 (localhost:4150) connecting to nsqd 2024/04/24 19:08:43 INF 10 (localhost:4150) connecting to nsqd 2024/04/24 19:09:03 INF 11 (localhost:4150) connecting to nsqd 2024/04/24 19:09:08 INF 12 (localhost:4150) connecting to nsqd 2024/04/24 19:09:18 INF 13 (localhost:4150) connecting to nsqd 2024/04/24 19:10:04 INF 14 (localhost:4150) connecting to nsqd 2024/04/24 19:10:05 INF 15 (localhost:4150) connecting to nsqd 2024/04/24 19:10:12 INF 16 (localhost:4150) connecting to nsqd 2024/04/24 19:10:39 INF 17 (localhost:4150) connecting to nsqd 2024/04/24 19:10:39 INF 18 (localhost:4150) connecting to nsqd 2024/04/24 19:10:39 INF 19 (localhost:4150) connecting to nsqd 2024/04/24 19:12:06 INF 20 (localhost:4150) connecting to nsqd 2024/04/24 19:12:26 INF 21 (localhost:4150) connecting to nsqd 2024/04/24 19:12:27 INF 22 (localhost:4150) connecting to nsqd 2024/04/24 19:12:45 INF 23 (localhost:4150) connecting to nsqd 2024/04/24 19:12:50 INF 24 (localhost:4150) connecting to nsqd 2024/04/24 19:13:02 INF 25 (localhost:4150) connecting to nsqd 2024/04/24 19:13:55 INF 26 (localhost:4150) connecting to nsqd 2024/04/24 19:13:56 INF 27 (localhost:4150) connecting to nsqd 2024/04/24 19:14:02 INF 28 (localhost:4150) connecting to nsqd 2024/04/24 19:14:29 INF 29 (localhost:4150) connecting to nsqd 2024/04/24 19:14:29 INF 30 (localhost:4150) connecting to nsqd 2024/04/24 19:14:30 INF 31 (localhost:4150) connecting to nsqd 2024/04/24 19:15:48 INF 32 (localhost:4150) connecting to nsqd 2024/04/24 19:16:11 INF 33 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { }, "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "some-name-disabled-pgbr-repo" "claimName": "some-name-pgbr-repo" "claimName": "some-name-required-pgbr-repo" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "200m", "cpu": "500m", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgha-scope": "some-name-disabled","pgo-version": "1.6.0","name": "some-name-disabled","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled","pgouser": "admin" "crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin" "defaultMode": 288 "defaultMode": 288, "defaultMode": 288, "defaultMode": 420, "deployment-name": "some-name" "deployment-name": "some-name-disabled" "deployment-name": "some-name-disabled-repl1" "deployment-name": "some-name-disabled-repl2" "deployment-name": "some-name-repl1" "deployment-name": "some-name-repl2" "deployment-name": "some-name-required" "deployment-name": "some-name-required","pgouser": "admin","crunchy-pgha-scope": "some-name-required","pgo-version": "1.6.0","name": "some-name-required","pg-cluster": "some-name-required" "deployment-name": "some-name-required-repl1" "deployment-name": "some-name-required-repl2" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0424 19:04:55.729534 1 shared_informer.go:240] Waiting for caches to sync for namespace I0424 19:04:55.830141 1 shared_informer.go:247] Caches are synced for namespace I0424 19:04:55.918911 1 shared_informer.go:240] Waiting for caches to sync for affinity-4406 I0424 19:04:56.019826 1 shared_informer.go:247] Caches are synced for affinity-4406 "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "image": "perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-rmdata", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "tls.crt", "key": "tls.key", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "256Mi" "memory": "48Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgconf/tls", "mountPath": "/pgconf/tls/pgbouncer", "mountPath": "/pgconf/tls-replication", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "name": "backrest", "name": "backrest-backup-some-name", "name": "backrest-backup-some-name-disabled", "name": "backrest-backup-some-name-required", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "cluster1-replication-ssl-keypair", "name": "cluster1-ssl-ca" "name": "cluster1-ssl-ca" "name": "cluster1-ssl-keypair" "name": "cluster1-ssl-keypair" "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "disabled-some-name-disabled", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "IS_BACKUP", "name": "IS_REPLICA", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "PG_CLUSTER", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_SCOPE", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "preferred-some-name", "name": "primary-volume" "name": "primary-volume", "name": "REMOVE_BACKUP", "name": "REMOVE_DATA", "name": "REPLICA_NAME", "name": "report", "name": "required-some-name-required", "name": "rmdata", "name": "root-volume" "name": "root-volume", "name": "some-name", "name": "some-name", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo" "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-disabled", "name": "some-name-disabled", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo" "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer-cm" "name": "some-name-disabled-pgbouncer-secret", "name": "some-name-disabled-pgbouncer-secret", "name": "some-name-disabled-pgbr-repo", "name": "some-name-disabled-pgha-config", "name": "some-name-disabled-pgha-config", "name": "some-name-disabled-repl1", "name": "some-name-disabled-repl1", "name": "some-name-disabled-repl2", "name": "some-name-disabled-repl2", "name": "some-name-disabled-stanza-create", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer-cm" "name": "some-name-pgbouncer-secret", "name": "some-name-pgbouncer-secret", "name": "some-name-pgbr-repo", "name": "some-name-pgha-config", "name": "some-name-pgha-config", "name": "some-name-repl1", "name": "some-name-repl1", "name": "some-name-repl2", "name": "some-name-repl2", "name": "some-name-required", "name": "some-name-required", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo" "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer-cm" "name": "some-name-required-pgbouncer-secret", "name": "some-name-required-pgbouncer-secret", "name": "some-name-required-pgbr-repo", "name": "some-name-required","pg-cluster": "some-name-required","deployment-name": "some-name-required","pgouser": "admin","crunchy-pgha-scope": "some-name-required","pgo-version": "1.6.0" "name": "some-name-required-pgha-config", "name": "some-name-required-pgha-config", "name": "some-name-required-repl1", "name": "some-name-required-repl1", "name": "some-name-required-repl2", "name": "some-name-required-repl2", "name": "some-name-required-rmdata-uyhk", "name": "some-name-required-rmdata-uyhk", "name": "some-name-required-stanza-create", "name": "some-name-rmdata-yjkm", "name": "some-name-rmdata-yjkm", "name": "some-name-stanza-create", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tls-pgbouncer" "name": "tls-pgbouncer", "name": "tls-replication" "name": "tls-replication", "name": "tls-server" "name": "tls-server", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "path": "tls-replication.crt" "path": "tls-replication.key" "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"some-name"} "persistentVolumeClaim":{"claimName":"some-name-disabled"} "persistentVolumeClaim":{"claimName":"some-name-disabled-repl1"} "persistentVolumeClaim":{"claimName":"some-name-disabled-repl2"} "persistentVolumeClaim":{"claimName":"some-name-repl1"} "persistentVolumeClaim":{"claimName":"some-name-repl2"} "persistentVolumeClaim":{"claimName":"some-name-required"} "persistentVolumeClaim":{"claimName":"some-name-required-repl1"} "persistentVolumeClaim":{"claimName":"some-name-required-repl2"} "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl2","pgo-version": "1.6.0","name": "some-name-disabled-replica" "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name","deployment-name": "some-name" "pgo-version": "1.6.0","name": "some-name-disabled","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled","pgouser": "admin","crunchy-pgha-scope": "some-name-disabled" "pgo-version": "1.6.0","name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl1" "pgo-version": "1.6.0","name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl1" "pgo-version": "1.6.0","name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl2" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl1" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl1" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl2" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl2" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl1" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl1" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl2" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl2" "pg-pod-anti-affinity": "disabled", "pg-pod-anti-affinity": "disabled", "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "required", "pg-pod-anti-affinity": "required", "pgrmdata": "true", "pgrmdata": "true", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 2022, "ports": [{ "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "replicas": 3, "requests": { "requests": { "require" "required", "requiredDuringSchedulingIgnoredDuringExecution": [ "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-disabled-backrest-repo-config" "secretName": "some-name-disabled-backrest-repo-config", "secretName": "some-name-disabled-backrest-repo-config" "secretName": "some-name-disabled-backrest-repo-config", "secretName": "some-name-disabled-postgres-secret" "secretName": "some-name-disabled-primaryuser-secret" "secretName": "some-name-disabled-some-name-secret" "secretName": "some-name-postgres-secret" "secretName": "some-name-primaryuser-secret" "secretName": "some-name-required-backrest-repo-config" "secretName": "some-name-required-backrest-repo-config", "secretName": "some-name-required-backrest-repo-config" "secretName": "some-name-required-backrest-repo-config", "secretName": "some-name-required-postgres-secret" "secretName": "some-name-required-primaryuser-secret" "secretName": "some-name-required-some-name-secret" "secretName": "some-name-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "serviceAccountName": "pgo-target", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-disabled-backrest-shared-repo", "service-name": "some-name-disabled-backrest-shared-repo", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-required-backrest-shared-repo", "service-name": "some-name-required-backrest-shared-repo", "service-name": "some-name-required-pgbouncer", "service-name": "some-name-required-pgbouncer", "service-name": "some-name-required-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "2Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "some-name" "some-name-required" "sources": [ "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1Gi" "strategy": { "targetPort": 2022, "template": { time="2024-04-24T19:04:54Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-24T19:05:35Z" level=error msg="get deployment some-name-repl1: deployments.apps \"some-name-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:05:40Z" level=error msg="get deployment some-name-repl2: deployments.apps \"some-name-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:06:20Z" level=error msg="Error completing workflow some-name-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-24T19:06:20Z" level=error msg="pgtasks.pg.percona.com \"some-name-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-24T19:06:56Z" level=error msg="no pod found for \"some-name\"" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-24T19:06:56Z" level=warning msg="crv1 pgreplica some-name-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:06:56Z" level=warning msg="crv1 pgreplica some-name-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:08:43Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:44Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:46Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:47Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:48Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:51Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:55Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:56Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:08:59Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:09:01Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:09:03Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:09:08Z" level=error msg="jobs.batch \"some-name-rmdata-yjkm\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-24T19:09:22Z" level=error msg="get deployment some-name-required-repl1: deployments.apps \"some-name-required-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:09:27Z" level=error msg="get deployment some-name-required-repl2: deployments.apps \"some-name-required-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:10:04Z" level=error msg="Error completing workflow some-name-required-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-24T19:10:04Z" level=error msg="pgtasks.pg.percona.com \"some-name-required-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-24T19:10:40Z" level=warning msg="crv1 pgreplica some-name-required-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:10:40Z" level=warning msg="crv1 pgreplica some-name-required-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:10:41Z" level=warning msg="crv1 pgreplica some-name-required-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:12:27Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:29Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:30Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:31Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:34Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:39Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:40Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:43Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:44Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:45Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:12:50Z" level=error msg="jobs.batch \"some-name-required-rmdata-uyhk\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-24T19:13:06Z" level=error msg="get deployment some-name-disabled-repl1: deployments.apps \"some-name-disabled-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:13:11Z" level=error msg="get deployment some-name-disabled-repl2: deployments.apps \"some-name-disabled-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-24T19:13:55Z" level=error msg="Error completing workflow some-name-disabled-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-24T19:13:55Z" level=error msg="pgtasks.pg.percona.com \"some-name-disabled-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-24T19:14:30Z" level=warning msg="crv1 pgreplica some-name-disabled-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:14:30Z" level=warning msg="crv1 pgreplica some-name-disabled-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-disabled-backrest-shared-repo" "value": "/backrestrepo/some-name-disabled-backrest-shared-repo" "value": "/backrestrepo/some-name-required-backrest-shared-repo" "value": "/backrestrepo/some-name-required-backrest-shared-repo" "value": "backup" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.109.90.40 --db-path=/pgdata/some-name" "value": " --db-host=10.109.90.44 --db-path=/pgdata/some-name-required" "value": " --db-host=10.109.90.48 --db-path=/pgdata/some-name-disabled" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/some-name" "value": "/pgdata/some-name" "value": "/pgdata/some-name-disabled" "value": "/pgdata/some-name-disabled" "value": "/pgdata/some-name-disabled-repl1" "value": "/pgdata/some-name-disabled-repl1" "value": "/pgdata/some-name-disabled-repl2" "value": "/pgdata/some-name-disabled-repl2" "value": "/pgdata/some-name-repl1" "value": "/pgdata/some-name-repl1" "value": "/pgdata/some-name-repl2" "value": "/pgdata/some-name-repl2" "value": "/pgdata/some-name-required" "value": "/pgdata/some-name-required" "value": "/pgdata/some-name-required-repl1" "value": "/pgdata/some-name-required-repl1" "value": "/pgdata/some-name-required-repl2" "value": "/pgdata/some-name-required-repl2" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "some-name" "value": "some-name-backrest-shared-repo" "value": "some-name-backrest-shared-repo-6f8f947dbc-gvhsr" "value": "some-name-disabled" "value": "some-name-disabled-backrest-shared-repo" "value": "some-name-disabled-backrest-shared-repo-6d97b67686-xfkdf" "value": "some-name-required" "value": "some-name-required-backrest-shared-repo" "value": "some-name-required-backrest-shared-repo-8558c9b6fc-l79mk" "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.109.90.40 --db-path=/pgdata/some-name" "value": "--type=full --db-host=10.109.90.44 --db-path=/pgdata/some-name-required" "value": "--type=full --db-host=10.109.90.48 --db-path=/pgdata/some-name-disabled" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.40ZIkkeOze/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YEMZhcGvv1 ++ mktemp + local LAST_ERR=/tmp/tmp.EOns93Jin1 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.40ZIkkeOze/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.40ZIkkeOze/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.40ZIkkeOze/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.YEMZhcGvv1 + cat /tmp/tmp.EOns93Jin1 Error from server (NotFound): error when deleting "/tmp/tmp.40ZIkkeOze/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.40ZIkkeOze/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.40ZIkkeOze/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.40ZIkkeOze/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.40ZIkkeOze/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.YEMZhcGvv1 /tmp/tmp.EOns93Jin1 + return 1 + true + yq w -d4 /tmp/tmp.40ZIkkeOze/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.40ZIkkeOze/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ajwCvsEUXj ++ mktemp + local LAST_ERR=/tmp/tmp.ESC3oGDv6K + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.40ZIkkeOze/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ajwCvsEUXj serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.ESC3oGDv6K + rm /tmp/tmp.ajwCvsEUXj /tmp/tmp.ESC3oGDv6K + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=affinity-4406 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-4406 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.40ZIkkeOze/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.zrNdfSh3IJ ++ mktemp + local LAST_ERR=/tmp/tmp.CqA4YgUmAW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.40ZIkkeOze/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.zrNdfSh3IJ serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.CqA4YgUmAW + rm /tmp/tmp.zrNdfSh3IJ /tmp/tmp.CqA4YgUmAW + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + xargs kubectl delete ns --grace-period=0 --force=true ++ mktemp + local LAST_OUT=/tmp/tmp.Qc4nAA1zGd ++ mktemp + local LAST_ERR=/tmp/tmp.PH2vCFB3Fr + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Qc4nAA1zGd + cat /tmp/tmp.PH2vCFB3Fr + rm /tmp/tmp.Qc4nAA1zGd /tmp/tmp.PH2vCFB3Fr + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "affinity-4406" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.IuiNESCjiS ++ mktemp + local LAST_ERR=/tmp/tmp.ovaIsz5e00 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.IuiNESCjiS + cat /tmp/tmp.ovaIsz5e00 Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.IuiNESCjiS /tmp/tmp.ovaIsz5e00 + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.SJPPeIHKXE ++ mktemp + local LAST_ERR=/tmp/tmp.sge7wdR4ep + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.SJPPeIHKXE + cat /tmp/tmp.sge7wdR4ep Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.SJPPeIHKXE /tmp/tmp.sge7wdR4ep + return 1 + true + rm -rf /tmp/tmp.40ZIkkeOze