++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/affinity.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/affinity.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + create_namespace affinity-13957 + local namespace=affinity-13957 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.VAIuIFPSnO ++ mktemp + local LAST_OUT=/tmp/tmp.FZ7OTT7Mje ++ mktemp + local LAST_ERR=/tmp/tmp.cLYHkRS84w + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + local LAST_ERR=/tmp/tmp.9lwl3os5vY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get ns + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.VAIuIFPSnO + cat /tmp/tmp.cLYHkRS84w + rm /tmp/tmp.VAIuIFPSnO /tmp/tmp.cLYHkRS84w + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.FZ7OTT7Mje + cat /tmp/tmp.9lwl3os5vY Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.FZ7OTT7Mje /tmp/tmp.9lwl3os5vY + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.Fa5CU00bMZ ++ mktemp + local LAST_ERR=/tmp/tmp.SHW54Up8Zh + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.Fa5CU00bMZ + cat /tmp/tmp.SHW54Up8Zh Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.Fa5CU00bMZ /tmp/tmp.SHW54Up8Zh + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.7mDly63jab ++ mktemp + local LAST_ERR=/tmp/tmp.h9uXPad9Rb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace affinity-13957 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace affinity-13957 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace affinity-13957 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.7mDly63jab + cat /tmp/tmp.h9uXPad9Rb Error from server (NotFound): namespaces "affinity-13957" not found + rm /tmp/tmp.7mDly63jab /tmp/tmp.h9uXPad9Rb + return 1 + : + wait_for_delete namespace/affinity-13957 + local res=namespace/affinity-13957 + set +o xtrace namespace/affinity-13957 - Error from server (NotFound): namespaces "affinity-13957" not found + kubectl_bin create namespace affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.ZI52J1oAWP ++ mktemp + local LAST_ERR=/tmp/tmp.Nu6HAvU3Xp + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl create namespace affinity-13957 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZI52J1oAWP namespace/affinity-13957 created + cat /tmp/tmp.Nu6HAvU3Xp + rm /tmp/tmp.ZI52J1oAWP /tmp/tmp.Nu6HAvU3Xp + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nEUdr9FTF9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j9f0T7lGLp ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.nEUdr9FTF9 ++ cat /tmp/tmp.j9f0T7lGLp ++ rm /tmp/tmp.nEUdr9FTF9 /tmp/tmp.j9f0T7lGLp ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox --namespace=affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.usVjgRwKI0 ++ mktemp + local LAST_ERR=/tmp/tmp.qTYAlUxHzx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox --namespace=affinity-13957 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.usVjgRwKI0 Context "gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox" modified. + cat /tmp/tmp.qTYAlUxHzx + rm /tmp/tmp.usVjgRwKI0 /tmp/tmp.qTYAlUxHzx + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=affinity-13957 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml disabled install affinity-13957 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=affinity-13957 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "affinity-13957"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^namespace: .*#namespace: "affinity-13957"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b ++ cut -d: -f1 + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-721-a67b3475b"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml metadata.namespace affinity-13957 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-721-a67b3475b-pgo-deployer + yq w -d3 - 'subjects[0].namespace' affinity-13957 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.IIbhFo6FJI/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "affinity-13957" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-721-a67b3475b" pgo_operator_namespace: "affinity-13957"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.IIbhFo6FJI/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.RBbhE6y2mJ ++ mktemp + local LAST_ERR=/tmp/tmp.AXbEAnq0Sv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.IIbhFo6FJI/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.RBbhE6y2mJ serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.AXbEAnq0Sv + rm /tmp/tmp.RBbhE6y2mJ /tmp/tmp.AXbEAnq0Sv + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.CqZwQDBZvO ++ mktemp + local LAST_ERR=/tmp/tmp.SLTQHfQqtN + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.CqZwQDBZvO serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.SLTQHfQqtN + rm /tmp/tmp.CqZwQDBZvO /tmp/tmp.SLTQHfQqtN + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q3G3kU8cu7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7GRqnv0UE2 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.q3G3kU8cu7 ++ cat /tmp/tmp.7GRqnv0UE2 ++ rm /tmp/tmp.q3G3kU8cu7 /tmp/tmp.7GRqnv0UE2 ++ return 0 + wait_pod postgres-operator-6df7cc4897-7f5nv + local pod=postgres-operator-6df7cc4897-7f5nv + local ns= + set +o xtrace postgres-operator-6df7cc4897-7f5nvtrue + [[ -n '' ]] + cluster=some-name + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=affinity-13957 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name affinity-13957 + local cluster=some-name + local namespace=affinity-13957 + yq w - metadata.name some-name-users-secret + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + kubectl_bin -n affinity-13957 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TSVdiJVpTY ++ mktemp + local LAST_ERR=/tmp/tmp.pu9sGnYYt9 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TSVdiJVpTY secret/some-name-users-secret created + cat /tmp/tmp.pu9sGnYYt9 + rm /tmp/tmp.TSVdiJVpTY /tmp/tmp.pu9sGnYYt9 + return 0 + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n affinity-13957 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.BvK1JmkaT5 ++ mktemp + local LAST_ERR=/tmp/tmp.aQqTnsfRxA + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BvK1JmkaT5 secret/some-name-pmm-secret created + cat /tmp/tmp.aQqTnsfRxA + rm /tmp/tmp.BvK1JmkaT5 /tmp/tmp.aQqTnsfRxA + return 0 + kubectl_bin -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ormnSpEvAH ++ mktemp + local LAST_ERR=/tmp/tmp.HpdXHZrpxS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ormnSpEvAH deployment.apps/pg-client created + cat /tmp/tmp.HpdXHZrpxS + rm /tmp/tmp.ormnSpEvAH /tmp/tmp.HpdXHZrpxS + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.deployment-name some-name + yq w - metadata.labels.name some-name + yq w - spec.database some-name + yq w - spec.keepBackups false + yq w - spec.name some-name + yq w - spec.clustername some-name + yq w - spec.keepData false + yq w - metadata.labels.pg-cluster some-name + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.user some-name + yq w - spec.secretsName some-name-users-secret + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - metadata.name some-name + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - metadata.labels.crunchy-pgha-scope some-name + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.4wLcAiuOJZ ++ mktemp + local LAST_ERR=/tmp/tmp.B39QDkc9Rz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4wLcAiuOJZ perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.B39QDkc9Rz + rm /tmp/tmp.4wLcAiuOJZ /tmp/tmp.B39QDkc9Rz + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name-backrest-shared-repo... + wait_deployment some-name + local name=some-name + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-13957 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jKq6JfgjSc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pv8V9tFAEY ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jKq6JfgjSc ++ cat /tmp/tmp.Pv8V9tFAEY ++ rm /tmp/tmp.jKq6JfgjSc /tmp/tmp.Pv8V9tFAEY ++ return 0 + status_query='kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vj3rdhrdXr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pCPRHOyx95 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Vj3rdhrdXr +++ cat /tmp/tmp.pCPRHOyx95 +++ rm /tmp/tmp.Vj3rdhrdXr /tmp/tmp.pCPRHOyx95 +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.htOIFjvLt0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MPD11A9SEz +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.htOIFjvLt0 +++ cat /tmp/tmp.MPD11A9SEz +++ rm /tmp/tmp.htOIFjvLt0 /tmp/tmp.MPD11A9SEz +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wdJz1j93m2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.shFAwMfipk +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wdJz1j93m2 +++ cat /tmp/tmp.shFAwMfipk +++ rm /tmp/tmp.wdJz1j93m2 /tmp/tmp.shFAwMfipk +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.U8VILt7O8K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qcbxGCFLUy +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.U8VILt7O8K +++ cat /tmp/tmp.qcbxGCFLUy +++ rm /tmp/tmp.U8VILt7O8K /tmp/tmp.qcbxGCFLUy +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F5sUfTRwIq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.u5gUJywCwd +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.F5sUfTRwIq +++ cat /tmp/tmp.u5gUJywCwd +++ rm /tmp/tmp.F5sUfTRwIq /tmp/tmp.u5gUJywCwd +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.F14zxDrBB1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.XRe6wD3Acd ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.F14zxDrBB1 ++++ cat /tmp/tmp.XRe6wD3Acd ++++ rm /tmp/tmp.F14zxDrBB1 /tmp/tmp.XRe6wD3Acd ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Rs8ZIFKwVp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YTkdamlCjD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Rs8ZIFKwVp +++ cat /tmp/tmp.YTkdamlCjD +++ rm /tmp/tmp.Rs8ZIFKwVp /tmp/tmp.YTkdamlCjD +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.affinity-13957 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.01aDPBpz7z +++ mktemp ++ local LAST_ERR=/tmp/tmp.etZGodFeqY ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.01aDPBpz7z ++ cat /tmp/tmp.etZGodFeqY ++ rm /tmp/tmp.01aDPBpz7z /tmp/tmp.etZGodFeqY ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Xi7son00P2 ++ mktemp + local LAST_ERR=/tmp/tmp.t7TR3sQhsi + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Xi7son00P2 + cat /tmp/tmp.t7TR3sQhsi + rm /tmp/tmp.Xi7son00P2 /tmp/tmp.t7TR3sQhsi + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9OOtd4lGh6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Fybmz7Ockv +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9OOtd4lGh6 +++ cat /tmp/tmp.Fybmz7Ockv +++ rm /tmp/tmp.9OOtd4lGh6 /tmp/tmp.Fybmz7Ockv +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XgYWuZkr36 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CSgkTHq9lu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.XgYWuZkr36 +++ cat /tmp/tmp.CSgkTHq9lu +++ rm /tmp/tmp.XgYWuZkr36 /tmp/tmp.CSgkTHq9lu +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uB6UpoWhsd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ggvc5YOrbx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.uB6UpoWhsd +++ cat /tmp/tmp.ggvc5YOrbx +++ rm /tmp/tmp.uB6UpoWhsd /tmp/tmp.ggvc5YOrbx +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CP8FSs7Ecf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uwXRLPOJQl ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.CP8FSs7Ecf ++++ cat /tmp/tmp.uwXRLPOJQl ++++ rm /tmp/tmp.CP8FSs7Ecf /tmp/tmp.uwXRLPOJQl ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8k0eMlH0xe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nW3i10t4af +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8k0eMlH0xe +++ cat /tmp/tmp.nW3i10t4af +++ rm /tmp/tmp.8k0eMlH0xe /tmp/tmp.nW3i10t4af +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.affinity-13957 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7l1u6cW37j +++ mktemp ++ local LAST_ERR=/tmp/tmp.omuBQWl3vg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.7l1u6cW37j ++ cat /tmp/tmp.omuBQWl3vg ++ rm /tmp/tmp.7l1u6cW37j /tmp/tmp.omuBQWl3vg ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.2eyWNBF3VJ ++ mktemp + local LAST_ERR=/tmp/tmp.VFEUxIhODz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.2eyWNBF3VJ + cat /tmp/tmp.VFEUxIhODz + rm /tmp/tmp.2eyWNBF3VJ /tmp/tmp.VFEUxIhODz + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iKo3VnTVGs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.t1aWALDBn2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iKo3VnTVGs +++ cat /tmp/tmp.t1aWALDBn2 +++ rm /tmp/tmp.iKo3VnTVGs /tmp/tmp.t1aWALDBn2 +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TGilmwlcbA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QneYq8367r +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.TGilmwlcbA +++ cat /tmp/tmp.QneYq8367r +++ rm /tmp/tmp.TGilmwlcbA /tmp/tmp.QneYq8367r +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O45pZTau2H ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vOQOeEJD0h +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.O45pZTau2H +++ cat /tmp/tmp.vOQOeEJD0h +++ rm /tmp/tmp.O45pZTau2H /tmp/tmp.vOQOeEJD0h +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ST1FUrlgFJ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ZGOGo7MoQ5 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.ST1FUrlgFJ ++++ cat /tmp/tmp.ZGOGo7MoQ5 ++++ rm /tmp/tmp.ST1FUrlgFJ /tmp/tmp.ZGOGo7MoQ5 ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.X2UiOc1OHI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eekHNoEBwH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.X2UiOc1OHI +++ cat /tmp/tmp.eekHNoEBwH +++ rm /tmp/tmp.X2UiOc1OHI /tmp/tmp.eekHNoEBwH +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.affinity-13957 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zZVOyEmh0f +++ mktemp ++ local LAST_ERR=/tmp/tmp.okkohzgU0g ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.zZVOyEmh0f ++ cat /tmp/tmp.okkohzgU0g ++ rm /tmp/tmp.zZVOyEmh0f /tmp/tmp.okkohzgU0g ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.fCn7MDaIQ5 ++ mktemp + local LAST_ERR=/tmp/tmp.qgprgF4pxb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.fCn7MDaIQ5 + cat /tmp/tmp.qgprgF4pxb + rm /tmp/tmp.fCn7MDaIQ5 /tmp/tmp.qgprgF4pxb + return 0 + sleep 10 + compare_kubectl deployment/some-name + local resource=deployment/some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - metadata.selfLink + yq d - '**.uid' + yq d - 'metadata.annotations."cloud.google.com/neg"' + local LAST_OUT=/tmp/tmp.SyJEwmjlkb + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - 'metadata.annotations."kubernetes.io/psp"' ++ mktemp + yq d - '**.clusterIP' + yq d - '**.dataSource' + yq d - '**.clusterIPs' + local LAST_ERR=/tmp/tmp.htlqFCpBAP + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.procMount' + yq d - '**.imagePullSecrets' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - spec.volumeMode + yq d - '**.enableServiceLinks' + yq d - '**."percona.com/*"' ++ seq 0 2 + yq d - spec.nodeName + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.nodePort' + yq d - '**.healthCheckNodePort' + yq d - '**.ipFamilyPolicy' + yq d - '**.internalTrafficPolicy' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.ipFamilies' + yq d - '**.preemptionPolicy' + yq d - status + yq d - metadata.deletionTimestamp + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==suffix)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(name==NAMESPACE)' + yq d - '**.controller-uid' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==PODNAME)' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SyJEwmjlkb + cat /tmp/tmp.htlqFCpBAP + rm /tmp/tmp.SyJEwmjlkb /tmp/tmp.htlqFCpBAP + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name.yml + compare_kubectl deployment/some-name-backrest-shared-repo + local resource=deployment/some-name-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-backrest-shared-repo.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-backrest-shared-repo + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - metadata.selfLink + local LAST_OUT=/tmp/tmp.5Kpq4BqjhX + yq d - '**.uid' + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."percona.com/*"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.imagePullSecrets' + yq d - '**.(name==suffix)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.volumeName' + yq d - '**.ipFamilies' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - status + yq d - spec.volumeMode + yq d - '**.dataSource' + yq d - spec.nodeName + yq d - '**.nodePort' ++ mktemp + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + local LAST_ERR=/tmp/tmp.N10Y9ZLZKr + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.controller-uid' + yq d - '**.finalizers' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.enableServiceLinks' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilyPolicy' + yq d - '**.internalTrafficPolicy' + yq d - '**.(name==PODNAME)' + yq d - '**.clusterIPs' + yq d - '**.clusterIP' + yq d - '**.image' ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name-backrest-shared-repo + yq d - '**.creationTimestamp' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.5Kpq4BqjhX + cat /tmp/tmp.N10Y9ZLZKr + rm /tmp/tmp.5Kpq4BqjhX /tmp/tmp.N10Y9ZLZKr + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-backrest-shared-repo.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name + local resource=job/backrest-backup-some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name.yml + local new_result=/tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.uid' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.clusterIP' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - metadata.resourceVersion + yq d - '**.clusterIPs' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - metadata.deletionTimestamp + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.storageClassName' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.finalizers' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - status + yq d - '**.(name==suffix)' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - '**.healthCheckNodePort' + yq d - '**."percona.com/*"' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - '**.ipFamilies' + yq d - '**.ipFamilyPolicy' + yq d - '**.nodePort' + yq d - '**.internalTrafficPolicy' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.(name==PODNAME)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - metadata.selfLink + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + kubectl_bin get -o yaml job/backrest-backup-some-name ++ mktemp + local LAST_OUT=/tmp/tmp.wbj5IToj4x ++ mktemp + local LAST_ERR=/tmp/tmp.03GGbRRsbn + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wbj5IToj4x + cat /tmp/tmp.03GGbRRsbn + rm /tmp/tmp.wbj5IToj4x /tmp/tmp.03GGbRRsbn + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml kind ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml metadata.annotations + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml metadata.generation + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml spec.completionMode + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name.yml /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name.yml + create_backup some-name preferred + local cluster=some-name + local bckp_prefix=preferred + local bckp_type=full + yq w - metadata.name preferred-some-name + yq w - spec.name preferred-some-name + yq w - spec.namespace affinity-13957 + yq w - spec.parameters.job-name preferred-some-name + kubectl_bin apply -f - + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.pg-cluster some-name + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name ++ mktemp ++ kubectl_bin get pods --selector=name=some-name-backrest-shared-repo,pg-cluster=some-name -o 'jsonpath={.items[].metadata.name}' +++ mktemp + local LAST_OUT=/tmp/tmp.eNlJo92rWS ++ mktemp ++ local LAST_OUT=/tmp/tmp.L5p2VUYcL9 +++ mktemp + local LAST_ERR=/tmp/tmp.sAWA0UyMF0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.pnISA4bv53 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit + for i in '$(seq 0 2)' +++ seq 0 2 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f - ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=some-name-backrest-shared-repo,pg-cluster=some-name -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.L5p2VUYcL9 ++ cat /tmp/tmp.pnISA4bv53 ++ rm /tmp/tmp.L5p2VUYcL9 /tmp/tmp.pnISA4bv53 ++ return 0 + yq w - spec.parameters.podname some-name-backrest-shared-repo-6f8f947dbc-fbx5j + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eNlJo92rWS pgtask.pg.percona.com/preferred-some-name created + cat /tmp/tmp.sAWA0UyMF0 + rm /tmp/tmp.eNlJo92rWS /tmp/tmp.sAWA0UyMF0 + return 0 + sleep 10 + wait_job_completion preferred-some-name + local job=preferred-some-name + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' preferred-some-name+ retry=0 ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/preferred-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete perconapgcluster some-name ++ mktemp + local LAST_OUT=/tmp/tmp.Wqaod2FzC4 ++ mktemp + local LAST_ERR=/tmp/tmp.IhA1hSS9Xd + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete perconapgcluster some-name + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Wqaod2FzC4 perconapgcluster.pg.percona.com "some-name" deleted + cat /tmp/tmp.IhA1hSS9Xd + rm /tmp/tmp.Wqaod2FzC4 /tmp/tmp.IhA1hSS9Xd + return 0 + sleep 30 + cluster=some-name-required + spinup_pgcluster some-name-required /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml '' '' required + local cluster=some-name-required + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=required + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=affinity-13957 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-required affinity-13957 + local cluster=some-name-required + local namespace=affinity-13957 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name some-name-required-users-secret + kubectl_bin -n affinity-13957 apply -f - + yq w - stringData.some-name some-name_pass ++ mktemp + local LAST_OUT=/tmp/tmp.c3bsZCVWCO ++ mktemp + local LAST_ERR=/tmp/tmp.sldMA4yTXe + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.c3bsZCVWCO secret/some-name-required-users-secret created + cat /tmp/tmp.sldMA4yTXe + rm /tmp/tmp.c3bsZCVWCO /tmp/tmp.sldMA4yTXe + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name some-name-required-pmm-secret + kubectl_bin -n affinity-13957 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GMksNiLUFL ++ mktemp + local LAST_ERR=/tmp/tmp.A5lnF6aaqP + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.GMksNiLUFL secret/some-name-required-pmm-secret created + cat /tmp/tmp.A5lnF6aaqP + rm /tmp/tmp.GMksNiLUFL /tmp/tmp.A5lnF6aaqP + return 0 + kubectl_bin -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MVJ5mwc7vt ++ mktemp + local LAST_ERR=/tmp/tmp.3wXuzEeMwZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.MVJ5mwc7vt deployment.apps/pg-client unchanged + cat /tmp/tmp.3wXuzEeMwZ + rm /tmp/tmp.MVJ5mwc7vt /tmp/tmp.3wXuzEeMwZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml some-name-required false false false false required false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local name=some-name-required + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=required + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml metadata.annotations.current-primary some-name-required + yq w - metadata.labels.crunchy-pgha-scope some-name-required + yq w - metadata.labels.name some-name-required + yq w - spec.database some-name-required + yq w - metadata.name some-name-required + yq w - spec.clustername some-name-required + yq w - metadata.labels.deployment-name some-name-required + yq w - metadata.labels.pg-cluster some-name-required + yq w - spec.name some-name-required + yq w - spec.keepData false + yq w - spec.keepBackups false + yq w - spec.secretsName some-name-required-users-secret + yq w - spec.user some-name + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + case ${podAntiAffinity} in + yq w /tmp/tmp.IIbhFo6FJI/cr.yaml spec.pgPrimary.affinity.antiAffinityType required + yq w - spec.backup.affinity.antiAffinityType required + yq w - spec.pgBouncer.affinity.antiAffinityType required + mv /tmp/tmp.IIbhFo6FJI/cr.podAffinity.yaml /tmp/tmp.IIbhFo6FJI/cr.yaml + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.wc4aZmSeFr ++ mktemp + local LAST_ERR=/tmp/tmp.mFHRaneTNC + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wc4aZmSeFr perconapgcluster.pg.percona.com/some-name-required created + cat /tmp/tmp.mFHRaneTNC + rm /tmp/tmp.wc4aZmSeFr /tmp/tmp.mFHRaneTNC + return 0 + wait_deployment some-name-required-backrest-shared-repo + local name=some-name-required-backrest-shared-repo + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name-required-backrest-shared-repo... + wait_deployment some-name-required + local name=some-name-required + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name-required.......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-required-stanza-create false + local job=some-name-required-stanza-create + local mandatory=false + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' some-name-required-stanza-create+ retry=0 ++ kubectl_bin -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-required-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-some-name-required + local job=backrest-backup-some-name-required + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' backrest-backup-some-name-required+ retry=0 ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-required -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-required + cluster_name=some-name-required + wait_cluster_status some-name-required 'pgcluster Initialized' + local cluster_name=some-name-required + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-13957 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0BGWHROA8h +++ mktemp ++ local LAST_ERR=/tmp/tmp.xjPkgd1XJU ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.0BGWHROA8h ++ cat /tmp/tmp.xjPkgd1XJU ++ rm /tmp/tmp.0BGWHROA8h /tmp/tmp.xjPkgd1XJU ++ return 0 + status_query='kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t8MDAwEFTV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4RU7s8fNv5 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.t8MDAwEFTV +++ cat /tmp/tmp.4RU7s8fNv5 +++ rm /tmp/tmp.t8MDAwEFTV /tmp/tmp.4RU7s8fNv5 +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ljBUV9S1DQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XkyUD54sjP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ljBUV9S1DQ +++ cat /tmp/tmp.XkyUD54sjP +++ rm /tmp/tmp.ljBUV9S1DQ /tmp/tmp.XkyUD54sjP +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JWrPcZmywR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xgmx76T7Y4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.JWrPcZmywR +++ cat /tmp/tmp.xgmx76T7Y4 +++ rm /tmp/tmp.JWrPcZmywR /tmp/tmp.xgmx76T7Y4 +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.K59D6fG6Vl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kr6v57D8nu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.K59D6fG6Vl +++ cat /tmp/tmp.kr6v57D8nu +++ rm /tmp/tmp.K59D6fG6Vl /tmp/tmp.kr6v57D8nu +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ccZSnxm4nG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GHYPdFkxW4 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.ccZSnxm4nG ++++ cat /tmp/tmp.GHYPdFkxW4 ++++ rm /tmp/tmp.ccZSnxm4nG /tmp/tmp.GHYPdFkxW4 ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7fD9TcG86U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YG7fY9VYii +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.7fD9TcG86U +++ cat /tmp/tmp.YG7fY9VYii +++ rm /tmp/tmp.7fD9TcG86U /tmp/tmp.YG7fY9VYii +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-required.affinity-13957 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-required.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Us124hR0QF +++ mktemp ++ local LAST_ERR=/tmp/tmp.tvZuR8v66G ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Us124hR0QF ++ cat /tmp/tmp.tvZuR8v66G ++ rm /tmp/tmp.Us124hR0QF /tmp/tmp.tvZuR8v66G ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Qx6Esz0WQG ++ mktemp + local LAST_ERR=/tmp/tmp.5DShB5IsTt + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Qx6Esz0WQG + cat /tmp/tmp.5DShB5IsTt + rm /tmp/tmp.Qx6Esz0WQG /tmp/tmp.5DShB5IsTt + return 0 ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ENIVV8P1Ge ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ADslaIa273 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ENIVV8P1Ge +++ cat /tmp/tmp.ADslaIa273 +++ rm /tmp/tmp.ENIVV8P1Ge /tmp/tmp.ADslaIa273 +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EZfeKW6hDi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FxmVHpAALm +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.EZfeKW6hDi +++ cat /tmp/tmp.FxmVHpAALm +++ rm /tmp/tmp.EZfeKW6hDi /tmp/tmp.FxmVHpAALm +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.a6ucKITub4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ggLRQSi83K +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.a6ucKITub4 +++ cat /tmp/tmp.ggLRQSi83K +++ rm /tmp/tmp.a6ucKITub4 /tmp/tmp.ggLRQSi83K +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.M1sPevIza1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4g5pNwakVt ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.M1sPevIza1 ++++ cat /tmp/tmp.4g5pNwakVt ++++ rm /tmp/tmp.M1sPevIza1 /tmp/tmp.4g5pNwakVt ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OYNuBLr6LI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.scy15JX0gk +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OYNuBLr6LI +++ cat /tmp/tmp.scy15JX0gk +++ rm /tmp/tmp.OYNuBLr6LI /tmp/tmp.scy15JX0gk +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-required.affinity-13957 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-required.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.usekx1vQyA +++ mktemp ++ local LAST_ERR=/tmp/tmp.tSD8YBooHX ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.usekx1vQyA ++ cat /tmp/tmp.tSD8YBooHX ++ rm /tmp/tmp.usekx1vQyA /tmp/tmp.tSD8YBooHX ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.mCUZVB8Si5 ++ mktemp + local LAST_ERR=/tmp/tmp.iIrMO2wDfI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.mCUZVB8Si5 + cat /tmp/tmp.iIrMO2wDfI + rm /tmp/tmp.mCUZVB8Si5 /tmp/tmp.iIrMO2wDfI + return 0 ++ get_psql_user_pass postgres some-name-required ++ local user=postgres ++ local cluster=some-name-required ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jEwhObK27G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SB0n6fDWjj +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jEwhObK27G +++ cat /tmp/tmp.SB0n6fDWjj +++ rm /tmp/tmp.jEwhObK27G /tmp/tmp.SB0n6fDWjj +++ return 0 ++ [[ xsome-name-required == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qtxQfJaoYv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.v37gJ0hxoX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-required -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qtxQfJaoYv +++ cat /tmp/tmp.v37gJ0hxoX +++ rm /tmp/tmp.qtxQfJaoYv /tmp/tmp.v37gJ0hxoX +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YYvSvMhECi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tjym3j92TR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.YYvSvMhECi +++ cat /tmp/tmp.tjym3j92TR +++ rm /tmp/tmp.YYvSvMhECi /tmp/tmp.tjym3j92TR +++ return 0 ++ [[ xsome-name-required-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dXIR2o3dOF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LEFgPs1VOZ ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-required -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.dXIR2o3dOF ++++ cat /tmp/tmp.LEFgPs1VOZ ++++ rm /tmp/tmp.dXIR2o3dOF /tmp/tmp.LEFgPs1VOZ ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1GtFqmZMEl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ScR9cWy6kQ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-required-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1GtFqmZMEl +++ cat /tmp/tmp.ScR9cWy6kQ +++ rm /tmp/tmp.1GtFqmZMEl /tmp/tmp.ScR9cWy6kQ +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-required.affinity-13957 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-required.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z2uESnwesJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.wPeVyq7hJG ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.z2uESnwesJ ++ cat /tmp/tmp.wPeVyq7hJG ++ rm /tmp/tmp.z2uESnwesJ /tmp/tmp.wPeVyq7hJG ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.DyZBJHOpme ++ mktemp + local LAST_ERR=/tmp/tmp.CLzE9UpLNQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-required.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.DyZBJHOpme + cat /tmp/tmp.CLzE9UpLNQ + rm /tmp/tmp.DyZBJHOpme /tmp/tmp.CLzE9UpLNQ + return 0 + sleep 10 + compare_kubectl deployment/some-name-required + local resource=deployment/some-name-required + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-required.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name-required.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - '**.uid' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - metadata.resourceVersion + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.clusterIP' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.dataSource' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**.volumeName' + yq d - '**.procMount' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.internalTrafficPolicy' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.imagePullSecrets' + yq d - '**.(name==suffix)' + yq d - '**.image' + yq d - '**.creationTimestamp' + yq d - '**.preemptionPolicy' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.ipFamilies' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.clusterIPs' + yq d - '**.storageClassName' + yq d - '**.healthCheckNodePort' + yq d - '**.ipFamilyPolicy' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.enableServiceLinks' + yq d - '**.controller-uid' + yq d - status + yq d - '**.nodePort' + yq d - '**."percona.com/*"' + yq d - '**.finalizers' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.(name==PODNAME)' + kubectl_bin get -o yaml deployment/some-name-required ++ mktemp + local LAST_OUT=/tmp/tmp.ZjiY2Hbxnj ++ mktemp + local LAST_ERR=/tmp/tmp.QyTXXHaoC1 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name-required + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ZjiY2Hbxnj + cat /tmp/tmp.QyTXXHaoC1 + rm /tmp/tmp.ZjiY2Hbxnj /tmp/tmp.QyTXXHaoC1 + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name-required.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-required.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name-required.yml + compare_kubectl deployment/some-name-required-backrest-shared-repo + local resource=deployment/some-name-required-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-required-backrest-shared-repo.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name-required-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-required-backrest-shared-repo + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.uid' + yq d - 'metadata.annotations."kubernetes.io/psp"' + local LAST_OUT=/tmp/tmp.m2F2vRUpAq + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - spec.volumeMode + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.clusterIPs' + yq d - '**.creationTimestamp' ++ mktemp + yq d - '**.dataSource' + local LAST_ERR=/tmp/tmp.lCwr7KamFe + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.procMount' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.imagePullSecrets' + yq d - '**.volumeName' ++ seq 0 2 + yq d - '**.storageClassName' + yq d - status + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name-required-backrest-shared-repo + yq d - '**.(name==suffix)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.healthCheckNodePort' + yq d - '**.nodePort' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.finalizers' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.enableServiceLinks' + yq d - '**.controller-uid' + yq d - '**.internalTrafficPolicy' + yq d - '**.(name==PODNAME)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilies' + yq d - '**.ipFamilyPolicy' + yq d - metadata.selfLink + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.m2F2vRUpAq + cat /tmp/tmp.lCwr7KamFe + rm /tmp/tmp.m2F2vRUpAq /tmp/tmp.lCwr7KamFe + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name-required-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-required-backrest-shared-repo.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name-required-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name-required + local resource=job/backrest-backup-some-name-required + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name-required.yml + local new_result=/tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml job/backrest-backup-some-name-required + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - metadata.selfLink ++ mktemp + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + local LAST_OUT=/tmp/tmp.hks8unMbCq + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**."batch.kubernetes.io/controller-uid"' ++ mktemp + local LAST_ERR=/tmp/tmp.SbIRSujm9m + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.dataSource' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.creationTimestamp' + yq d - '**.clusterIP' + yq d - '**.image' + yq d - '**.clusterIPs' + yq d - '**.finalizers' + yq d - '**.procMount' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.storageClassName' + yq d - '**."percona.com/*"' + yq d - '**.volumeName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - spec.volumeMode + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.nodeName + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."volume.kubernetes.io/selected-node"' ++ seq 0 2 + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - '**.nodePort' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name-required + yq d - status + yq d - '**.(name==suffix)' + yq d - '**.enableServiceLinks' + yq d - '**.(name==NAMESPACE)' + yq d - '**.ipFamilies' + yq d - '**.(name==COMMAND_OPTS)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.ipFamilyPolicy' + yq d - '**.(name==PODNAME)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.internalTrafficPolicy' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.preemptionPolicy' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hks8unMbCq + cat /tmp/tmp.SbIRSujm9m + rm /tmp/tmp.hks8unMbCq /tmp/tmp.SbIRSujm9m + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml kind ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml metadata.annotations + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml metadata.generation + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml spec.completionMode + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name-required.yml /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-required.yml + create_backup some-name-required required + local cluster=some-name-required + local bckp_prefix=required + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name-required + yq w - spec.name required-some-name-required + yq w - spec.parameters.job-name required-some-name-required + yq w --style=single -- - spec.parameters.backrest-opts --type=full + yq w - spec.parameters.pg-cluster some-name-required + yq w - spec.namespace affinity-13957 + kubectl_bin apply -f - + yq w - metadata.name required-some-name-required ++ kubectl_bin get pods --selector=name=some-name-required-backrest-shared-repo,pg-cluster=some-name-required -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ mktemp ++ local LAST_OUT=/tmp/tmp.iei7RVS07Y + local LAST_OUT=/tmp/tmp.LAKnvqedYU +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.me2yR1Stqr + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.mtzdWEbDxK ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f - +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=some-name-required-backrest-shared-repo,pg-cluster=some-name-required -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.iei7RVS07Y ++ cat /tmp/tmp.mtzdWEbDxK ++ rm /tmp/tmp.iei7RVS07Y /tmp/tmp.mtzdWEbDxK ++ return 0 + yq w - spec.parameters.podname some-name-required-backrest-shared-repo-8558c9b6fc-ss9q6 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LAKnvqedYU pgtask.pg.percona.com/required-some-name-required created + cat /tmp/tmp.me2yR1Stqr + rm /tmp/tmp.LAKnvqedYU /tmp/tmp.me2yR1Stqr + return 0 + sleep 10 + wait_job_completion required-some-name-required + local job=required-some-name-required + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' required-some-name-required+ retry=0 ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/required-some-name-required -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete perconapgcluster some-name-required ++ mktemp + local LAST_OUT=/tmp/tmp.nMWgU8vhDt ++ mktemp + local LAST_ERR=/tmp/tmp.oSHnk70whT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete perconapgcluster some-name-required + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nMWgU8vhDt perconapgcluster.pg.percona.com "some-name-required" deleted + cat /tmp/tmp.oSHnk70whT + rm /tmp/tmp.nMWgU8vhDt /tmp/tmp.oSHnk70whT + return 0 + sleep 30 + cluster=some-name-disabled + spinup_pgcluster some-name-disabled /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml '' '' disabled + local cluster=some-name-disabled + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=disabled + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=affinity-13957 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-disabled affinity-13957 + local cluster=some-name-disabled + local namespace=affinity-13957 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + yq w - metadata.name some-name-disabled-users-secret + kubectl_bin -n affinity-13957 apply -f - + yq w - stringData.some-name some-name_pass ++ mktemp + local LAST_OUT=/tmp/tmp.supJ37rhnK ++ mktemp + local LAST_ERR=/tmp/tmp.34LoxpS5b2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.supJ37rhnK secret/some-name-disabled-users-secret created + cat /tmp/tmp.34LoxpS5b2 + rm /tmp/tmp.supJ37rhnK /tmp/tmp.34LoxpS5b2 + return 0 + yq w - metadata.name some-name-disabled-pmm-secret + kubectl_bin -n affinity-13957 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.SFA1PkUSBw ++ mktemp + local LAST_ERR=/tmp/tmp.2FQxN7KMJS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.SFA1PkUSBw secret/some-name-disabled-pmm-secret created + cat /tmp/tmp.2FQxN7KMJS + rm /tmp/tmp.SFA1PkUSBw /tmp/tmp.2FQxN7KMJS + return 0 + kubectl_bin -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.AHnY7JL8f1 ++ mktemp + local LAST_ERR=/tmp/tmp.YphbabZyxH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.AHnY7JL8f1 deployment.apps/pg-client unchanged + cat /tmp/tmp.YphbabZyxH + rm /tmp/tmp.AHnY7JL8f1 /tmp/tmp.YphbabZyxH + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml some-name-disabled false false false false disabled false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml + local name=some-name-disabled + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=disabled + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/cr.yaml metadata.annotations.current-primary some-name-disabled + yq w - metadata.labels.crunchy-pgha-scope some-name-disabled + yq w - metadata.labels.deployment-name some-name-disabled + yq w - spec.name some-name-disabled + yq w - spec.keepData false + yq w - metadata.name some-name-disabled + yq w - spec.database some-name-disabled + yq w - spec.clustername some-name-disabled + yq w - spec.user some-name + yq w - spec.secretsName some-name-disabled-users-secret + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.keepBackups false + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - metadata.labels.pg-cluster some-name-disabled + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - metadata.labels.name some-name-disabled + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + case ${podAntiAffinity} in + yq w - spec.backup.affinity.antiAffinityType disabled + yq w - spec.pgBouncer.affinity.antiAffinityType disabled + yq w /tmp/tmp.IIbhFo6FJI/cr.yaml spec.pgPrimary.affinity.antiAffinityType disabled + mv /tmp/tmp.IIbhFo6FJI/cr.podAffinity.yaml /tmp/tmp.IIbhFo6FJI/cr.yaml + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 ++ mktemp + local LAST_OUT=/tmp/tmp.nENzRhh7Fb ++ mktemp + local LAST_ERR=/tmp/tmp.H0o7yHnXHv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.IIbhFo6FJI/cr.yaml -n affinity-13957 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nENzRhh7Fb perconapgcluster.pg.percona.com/some-name-disabled created + cat /tmp/tmp.H0o7yHnXHv + rm /tmp/tmp.nENzRhh7Fb /tmp/tmp.H0o7yHnXHv + return 0 + wait_deployment some-name-disabled-backrest-shared-repo + local name=some-name-disabled-backrest-shared-repo + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name-disabled-backrest-shared-repo... + wait_deployment some-name-disabled + local name=some-name-disabled + local target_namespace=affinity-13957 + sleep 10 + set +o xtrace some-name-disabled......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-disabled-stanza-create false + local job=some-name-disabled-stanza-create + local mandatory=false + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' some-name-disabled-stanza-create+ retry=0 ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/some-name-disabled-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name-disabled + local job=backrest-backup-some-name-disabled + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' backrest-backup-some-name-disabled+ retry=0 ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/backrest-backup-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-disabled + cluster_name=some-name-disabled + wait_cluster_status some-name-disabled 'pgcluster Initialized' + local cluster_name=some-name-disabled + local 'target_status=pgcluster Initialized' + local target_namespace=affinity-13957 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTOaA2kEzP +++ mktemp ++ local LAST_ERR=/tmp/tmp.ShDmvRTtsQ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.xTOaA2kEzP ++ cat /tmp/tmp.ShDmvRTtsQ ++ rm /tmp/tmp.xTOaA2kEzP /tmp/tmp.ShDmvRTtsQ ++ return 0 + status_query='kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gQImyWgwpc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xY7fXvGj4n +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.gQImyWgwpc +++ cat /tmp/tmp.xY7fXvGj4n +++ rm /tmp/tmp.gQImyWgwpc /tmp/tmp.xY7fXvGj4n +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qowdILw90w ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oVGlq29m4r +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qowdILw90w +++ cat /tmp/tmp.oVGlq29m4r +++ rm /tmp/tmp.qowdILw90w /tmp/tmp.oVGlq29m4r +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mkhMkFHguN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5cKMOa6A7J +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.mkhMkFHguN +++ cat /tmp/tmp.5cKMOa6A7J +++ rm /tmp/tmp.mkhMkFHguN /tmp/tmp.5cKMOa6A7J +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WEdla1XTzo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.novUi01qnc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.WEdla1XTzo +++ cat /tmp/tmp.novUi01qnc +++ rm /tmp/tmp.WEdla1XTzo /tmp/tmp.novUi01qnc +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Rox4Nn02D1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.PfUbEY3WKj ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.Rox4Nn02D1 ++++ cat /tmp/tmp.PfUbEY3WKj ++++ rm /tmp/tmp.Rox4Nn02D1 /tmp/tmp.PfUbEY3WKj ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wYRBY81BPf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xcS99kUsDN +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.wYRBY81BPf +++ cat /tmp/tmp.xcS99kUsDN +++ rm /tmp/tmp.wYRBY81BPf /tmp/tmp.xcS99kUsDN +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-disabled.affinity-13957 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-disabled.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXWhM6FoY7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LoNEvoOmYv ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.XXWhM6FoY7 ++ cat /tmp/tmp.LoNEvoOmYv ++ rm /tmp/tmp.XXWhM6FoY7 /tmp/tmp.LoNEvoOmYv ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.IVuQQEqe3Y ++ mktemp + local LAST_ERR=/tmp/tmp.D3KdLsF22r + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IVuQQEqe3Y + cat /tmp/tmp.D3KdLsF22r + rm /tmp/tmp.IVuQQEqe3Y /tmp/tmp.D3KdLsF22r + return 0 ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RmjC93q9l7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QOTWbOiFQX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.RmjC93q9l7 +++ cat /tmp/tmp.QOTWbOiFQX +++ rm /tmp/tmp.RmjC93q9l7 /tmp/tmp.QOTWbOiFQX +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zQ3hH52fA6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BzrVK8Vj9D +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.zQ3hH52fA6 +++ cat /tmp/tmp.BzrVK8Vj9D +++ rm /tmp/tmp.zQ3hH52fA6 /tmp/tmp.BzrVK8Vj9D +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z3oDB6IblD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GZ0R0ohhX4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.z3oDB6IblD +++ cat /tmp/tmp.GZ0R0ohhX4 +++ rm /tmp/tmp.z3oDB6IblD /tmp/tmp.GZ0R0ohhX4 +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.reUlDSZnWn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.obROGpnLJ7 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.reUlDSZnWn ++++ cat /tmp/tmp.obROGpnLJ7 ++++ rm /tmp/tmp.reUlDSZnWn /tmp/tmp.obROGpnLJ7 ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iMLQRbw3WZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RYT1CvrT7S +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iMLQRbw3WZ +++ cat /tmp/tmp.RYT1CvrT7S +++ rm /tmp/tmp.iMLQRbw3WZ /tmp/tmp.RYT1CvrT7S +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-disabled.affinity-13957 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-disabled.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PcWZQnURtr +++ mktemp ++ local LAST_ERR=/tmp/tmp.p981mIfX0Y ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.PcWZQnURtr ++ cat /tmp/tmp.p981mIfX0Y ++ rm /tmp/tmp.PcWZQnURtr /tmp/tmp.p981mIfX0Y ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.LOxHcIah09 ++ mktemp + local LAST_ERR=/tmp/tmp.GPRmuabPXM + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LOxHcIah09 + cat /tmp/tmp.GPRmuabPXM + rm /tmp/tmp.LOxHcIah09 /tmp/tmp.GPRmuabPXM + return 0 ++ get_psql_user_pass postgres some-name-disabled ++ local user=postgres ++ local cluster=some-name-disabled ++ local namespace=affinity-13957 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PNQ6hSSh2W ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S76G8CHipx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.PNQ6hSSh2W +++ cat /tmp/tmp.S76G8CHipx +++ rm /tmp/tmp.PNQ6hSSh2W /tmp/tmp.S76G8CHipx +++ return 0 ++ [[ xsome-name-disabled == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0t5K7Mqb5z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oww0MF4xQE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name-disabled -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0t5K7Mqb5z +++ cat /tmp/tmp.oww0MF4xQE +++ rm /tmp/tmp.0t5K7Mqb5z /tmp/tmp.oww0MF4xQE +++ return 0 +++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zxSX7ieuoK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Gw9VohlG00 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.zxSX7ieuoK +++ cat /tmp/tmp.Gw9VohlG00 +++ rm /tmp/tmp.zxSX7ieuoK /tmp/tmp.Gw9VohlG00 +++ return 0 ++ [[ xsome-name-disabled-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EECzPAw9s2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lGivpVh2iZ ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n affinity-13957 get perconapgcluster/some-name-disabled -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.EECzPAw9s2 ++++ cat /tmp/tmp.lGivpVh2iZ ++++ rm /tmp/tmp.EECzPAw9s2 /tmp/tmp.lGivpVh2iZ ++++ return 0 +++ kubectl_bin -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oOme0AkIQK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vtuZKGXHsx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n affinity-13957 get secret/some-name-disabled-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.oOme0AkIQK +++ cat /tmp/tmp.vtuZKGXHsx +++ rm /tmp/tmp.oOme0AkIQK /tmp/tmp.vtuZKGXHsx +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-disabled.affinity-13957 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-disabled.affinity-13957 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hsYmJYUcEI +++ mktemp ++ local LAST_ERR=/tmp/tmp.6KhsgFPSWK ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n affinity-13957 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.hsYmJYUcEI ++ cat /tmp/tmp.6KhsgFPSWK ++ rm /tmp/tmp.hsYmJYUcEI /tmp/tmp.6KhsgFPSWK ++ return 0 + local client_container=pg-client-76df9df876-9bp66 + kubectl_bin exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.XRuWn586R6 ++ mktemp + local LAST_ERR=/tmp/tmp.66sglMcxyw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-9bp66 -n affinity-13957 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-disabled.affinity-13957.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XRuWn586R6 + cat /tmp/tmp.66sglMcxyw + rm /tmp/tmp.XRuWn586R6 /tmp/tmp.66sglMcxyw + return 0 + sleep 10 + compare_kubectl deployment/some-name-disabled + local resource=deployment/some-name-disabled + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-disabled.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-disabled + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' ++ mktemp + yq d - metadata.resourceVersion + yq d - '**.uid' + yq d - metadata.deletionTimestamp + local LAST_OUT=/tmp/tmp.VfYPP2G42N + yq d - '**.image' + yq d - '**.dataSource' + yq d - '**.procMount' ++ mktemp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + local LAST_ERR=/tmp/tmp.rhbZiywZ8P + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.storageClassName' + yq d - '**.nodePort' + yq d - '**.finalizers' + yq d - spec.volumeMode + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - spec.nodeName + yq d - '**.volumeName' + yq d - '**.(name==COMMAND_OPTS)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**."percona.com/*"' + yq d - '**.enableServiceLinks' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.creationTimestamp' + yq d - status + yq d - '**.(name==suffix)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.controller-uid' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.clusterIP' ++ seq 0 2 + yq d - '**.(name==PODNAME)' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name-disabled + yq d - '**.clusterIPs' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.preemptionPolicy' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.ipFamilyPolicy' + yq d - '**.ipFamilies' + yq d - '**.internalTrafficPolicy' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - metadata.selfLink + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.VfYPP2G42N + cat /tmp/tmp.rhbZiywZ8P + rm /tmp/tmp.VfYPP2G42N /tmp/tmp.rhbZiywZ8P + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-disabled.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled.yml + compare_kubectl deployment/some-name-disabled-backrest-shared-repo + local resource=deployment/some-name-disabled-backrest-shared-repo + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-disabled-backrest-shared-repo.yml + local new_result=/tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled-backrest-shared-repo.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name-disabled-backrest-shared-repo + yq d - metadata.managedFields ++ mktemp + yq d - '**.namespace' + yq d - metadata.resourceVersion + yq d - metadata.selfLink + local LAST_OUT=/tmp/tmp.DyG9s7Xdjh + yq d - '**.uid' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.procMount' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**."batch.kubernetes.io/controller-uid"' ++ mktemp + local LAST_ERR=/tmp/tmp.RY3yOGhSXn + local exit_status=0 + yq d - '**.imagePullSecrets' + local timeout=4 + set +o errexit + yq d - '**.storageClassName' + yq d - '**.volumeName' + yq d - '**.finalizers' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**.controller-uid' + yq d - '**.healthCheckNodePort' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - status + yq d - '**.(name==COMMAND_OPTS)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.nodePort' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.internalTrafficPolicy' + yq d - '**."percona.com/*"' + yq d - '**.(name==suffix)' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==PODNAME)' ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name-disabled-backrest-shared-repo + yq d - '**.ipFamilyPolicy' + yq d - '**.enableServiceLinks' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - '**.image' + yq d - '**.ipFamilies' + yq d - '**.creationTimestamp' + yq d - metadata.deletionTimestamp + yq d - '**.creationTimestamp' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.DyG9s7Xdjh + cat /tmp/tmp.RY3yOGhSXn + rm /tmp/tmp.DyG9s7Xdjh /tmp/tmp.RY3yOGhSXn + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled-backrest-shared-repo.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/deployment_some-name-disabled-backrest-shared-repo.yml /tmp/tmp.IIbhFo6FJI/deployment_some-name-disabled-backrest-shared-repo.yml + compare_kubectl job/backrest-backup-some-name-disabled + local resource=job/backrest-backup-some-name-disabled + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name-disabled.yml + local new_result=/tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml job/backrest-backup-some-name-disabled + yq d - metadata.managedFields ++ mktemp + yq d - '**.uid' + yq d - metadata.deletionTimestamp + local LAST_OUT=/tmp/tmp.h3Do701nO8 + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - metadata.resourceVersion + yq d - metadata.selfLink + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' ++ mktemp + yq d - '**.creationTimestamp' + yq d - '**.namespace' + local LAST_ERR=/tmp/tmp.cNFSnQuQaF + yq d - '**.volumeName' + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.dataSource' + yq d - '**.clusterIPs' + yq d - '**.procMount' ++ seq 0 2 + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - spec.nodeName + for i in '$(seq 0 2)' + yq d - '**.clusterIP' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml job/backrest-backup-some-name-disabled + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - spec.volumeMode + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==PODNAME)' + yq d - '**.healthCheckNodePort' + yq d - '**.nodePort' + yq d - '**.internalTrafficPolicy' + yq d - '**.imagePullSecrets' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.enableServiceLinks' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.(name==suffix)' + yq d - status + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilies' + yq d - '**.ipFamilyPolicy' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.h3Do701nO8 + cat /tmp/tmp.cNFSnQuQaF + rm /tmp/tmp.h3Do701nO8 /tmp/tmp.cNFSnQuQaF + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml kind ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml metadata.annotations + [[ x{} == \x ]] ++ yq r /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml metadata.annotations + [[ x{} == \x\{\} ]] + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml metadata.annotations + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml metadata.generation + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml spec.completionMode + yq d -i /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml spec.suspend + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/affinity/compare/job_backrest-backup-some-name-disabled.yml /tmp/tmp.IIbhFo6FJI/job_backrest-backup-some-name-disabled.yml + create_backup some-name-disabled disabled + local cluster=some-name-disabled + local bckp_prefix=disabled + local bckp_type=full + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/backup.yml metadata.labels.pg-cluster some-name-disabled + yq w - spec.name disabled-some-name-disabled + yq w - spec.namespace affinity-13957 + yq w - spec.parameters.job-name disabled-some-name-disabled + yq w - spec.parameters.pg-cluster some-name-disabled + yq w --style=single -- - spec.parameters.backrest-opts --type=full + kubectl_bin apply -f - + yq w - metadata.name disabled-some-name-disabled ++ mktemp + local LAST_OUT=/tmp/tmp.obin9rYjxT ++ kubectl_bin get pods --selector=name=some-name-disabled-backrest-shared-repo,pg-cluster=some-name-disabled -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.iforEnW2zX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 ++ local LAST_OUT=/tmp/tmp.ZYBaQsW0dX + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ mktemp + kubectl apply -f - ++ local LAST_ERR=/tmp/tmp.7M9y2RrC28 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=some-name-disabled-backrest-shared-repo,pg-cluster=some-name-disabled -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ZYBaQsW0dX ++ cat /tmp/tmp.7M9y2RrC28 ++ rm /tmp/tmp.ZYBaQsW0dX /tmp/tmp.7M9y2RrC28 ++ return 0 + yq w - spec.parameters.podname some-name-disabled-backrest-shared-repo-6d97b67686-mddxz + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.obin9rYjxT pgtask.pg.percona.com/disabled-some-name-disabled created + cat /tmp/tmp.iforEnW2zX + rm /tmp/tmp.obin9rYjxT /tmp/tmp.iforEnW2zX + return 0 + sleep 10 + wait_job_completion disabled-some-name-disabled + local job=disabled-some-name-disabled + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' disabled-some-name-disabled+ retry=0 ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/disabled-some-name-disabled -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + destroy affinity-13957 + grep -v level=info + grep -v level=debug ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + grep -v 'Getting tasks for pod' +++ mktemp + grep -v 'Getting pods from source' + tee /tmp/tmp.IIbhFo6FJI/operator.log + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u ++ local LAST_OUT=/tmp/tmp.6eZhuJQ3D3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.h32oJuJVsl ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.6eZhuJQ3D3 ++ cat /tmp/tmp.h32oJuJVsl ++ rm /tmp/tmp.6eZhuJQ3D3 /tmp/tmp.h32oJuJVsl ++ return 0 + kubectl_bin logs postgres-operator-6df7cc4897-7f5nv -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.TWXAX0ZuKb ++ mktemp + local LAST_ERR=/tmp/tmp.kGjlI0g6U2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl logs postgres-operator-6df7cc4897-7f5nv -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.TWXAX0ZuKb + cat /tmp/tmp.kGjlI0g6U2 + rm /tmp/tmp.TWXAX0ZuKb /tmp/tmp.kGjlI0g6U2 + return 0 ] { } }, ] } }, { } }, ] { } }, { } ] } { } }, ] } ] { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ], { } }, }], { } }, }], } ], } }, } }, } }, { } }, }, 2024/04/23 16:10:53 INF 1 (localhost:4150) connecting to nsqd 2024/04/23 16:11:41 INF 2 (localhost:4150) connecting to nsqd 2024/04/23 16:11:42 INF 3 (localhost:4150) connecting to nsqd 2024/04/23 16:11:49 INF 4 (localhost:4150) connecting to nsqd 2024/04/23 16:12:18 INF 5 (localhost:4150) connecting to nsqd 2024/04/23 16:12:18 INF 6 (localhost:4150) connecting to nsqd 2024/04/23 16:12:18 INF 7 (localhost:4150) connecting to nsqd 2024/04/23 16:13:41 INF 8 (localhost:4150) connecting to nsqd 2024/04/23 16:14:14 INF 9 (localhost:4150) connecting to nsqd 2024/04/23 16:14:15 INF 10 (localhost:4150) connecting to nsqd 2024/04/23 16:14:39 INF 11 (localhost:4150) connecting to nsqd 2024/04/23 16:14:44 INF 12 (localhost:4150) connecting to nsqd 2024/04/23 16:14:51 INF 13 (localhost:4150) connecting to nsqd 2024/04/23 16:15:41 INF 14 (localhost:4150) connecting to nsqd 2024/04/23 16:15:42 INF 15 (localhost:4150) connecting to nsqd 2024/04/23 16:15:48 INF 16 (localhost:4150) connecting to nsqd 2024/04/23 16:16:16 INF 17 (localhost:4150) connecting to nsqd 2024/04/23 16:16:16 INF 18 (localhost:4150) connecting to nsqd 2024/04/23 16:16:17 INF 19 (localhost:4150) connecting to nsqd 2024/04/23 16:17:30 INF 20 (localhost:4150) connecting to nsqd 2024/04/23 16:17:51 INF 21 (localhost:4150) connecting to nsqd 2024/04/23 16:17:52 INF 22 (localhost:4150) connecting to nsqd 2024/04/23 16:18:12 INF 23 (localhost:4150) connecting to nsqd 2024/04/23 16:18:17 INF 24 (localhost:4150) connecting to nsqd 2024/04/23 16:18:27 INF 25 (localhost:4150) connecting to nsqd 2024/04/23 16:19:16 INF 26 (localhost:4150) connecting to nsqd 2024/04/23 16:19:17 INF 27 (localhost:4150) connecting to nsqd 2024/04/23 16:19:23 INF 28 (localhost:4150) connecting to nsqd 2024/04/23 16:19:52 INF 29 (localhost:4150) connecting to nsqd 2024/04/23 16:19:52 INF 30 (localhost:4150) connecting to nsqd 2024/04/23 16:19:53 INF 31 (localhost:4150) connecting to nsqd 2024/04/23 16:20:58 INF 32 (localhost:4150) connecting to nsqd 2024/04/23 16:21:20 INF 33 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { }, "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "some-name-disabled-pgbr-repo" "claimName": "some-name-pgbr-repo" "claimName": "some-name-required-pgbr-repo" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "200m", "cpu": "500m", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgha-scope": "some-name-disabled","pgo-version": "1.6.0","name": "some-name-disabled","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled","pgouser": "admin" "defaultMode": 288 "defaultMode": 288, "defaultMode": 288, "defaultMode": 420, "deployment-name": "some-name" "deployment-name": "some-name-disabled" "deployment-name": "some-name-disabled-repl1" "deployment-name": "some-name-disabled-repl2" "deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name" "deployment-name": "some-name-repl1" "deployment-name": "some-name-repl2" "deployment-name": "some-name-required" "deployment-name": "some-name-required-repl1" "deployment-name": "some-name-required-repl2" "deployment-name": "some-name-required-repl2","pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0423 16:10:17.524784 1 shared_informer.go:240] Waiting for caches to sync for namespace I0423 16:10:17.625446 1 shared_informer.go:247] Caches are synced for namespace I0423 16:10:17.772405 1 shared_informer.go:240] Waiting for caches to sync for affinity-13957 I0423 16:10:17.873240 1 shared_informer.go:247] Caches are synced for affinity-13957 "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "image": "perconalab/percona-postgresql-operator:PR-721-a67b3475b-pgo-rmdata", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "tls.crt", "key": "tls.key", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "256Mi" "memory": "48Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgconf/tls", "mountPath": "/pgconf/tls/pgbouncer", "mountPath": "/pgconf/tls-replication", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "name": "backrest", "name": "backrest-backup-some-name", "name": "backrest-backup-some-name-disabled", "name": "backrest-backup-some-name-required", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "cluster1-replication-ssl-keypair", "name": "cluster1-ssl-ca" "name": "cluster1-ssl-ca" "name": "cluster1-ssl-keypair" "name": "cluster1-ssl-keypair" "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "disabled-some-name-disabled", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "IS_BACKUP", "name": "IS_REPLICA", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "PG_CLUSTER", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_SCOPE", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "preferred-some-name", "name": "primary-volume" "name": "primary-volume", "name": "REMOVE_BACKUP", "name": "REMOVE_DATA", "name": "REPLICA_NAME", "name": "report", "name": "required-some-name-required", "name": "rmdata", "name": "root-volume" "name": "root-volume", "name": "some-name", "name": "some-name", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo" "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-disabled", "name": "some-name-disabled", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo" "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-backrest-shared-repo", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer", "name": "some-name-disabled-pgbouncer-cm" "name": "some-name-disabled-pgbouncer-secret", "name": "some-name-disabled-pgbouncer-secret", "name": "some-name-disabled-pgbr-repo", "name": "some-name-disabled-pgha-config", "name": "some-name-disabled-pgha-config", "name": "some-name-disabled-repl1", "name": "some-name-disabled-repl1", "name": "some-name-disabled-repl2", "name": "some-name-disabled-repl2", "name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl1","pgo-version": "1.6.0" "name": "some-name-disabled-stanza-create", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer-cm" "name": "some-name-pgbouncer-secret", "name": "some-name-pgbouncer-secret", "name": "some-name-pgbr-repo", "name": "some-name-pgha-config", "name": "some-name-pgha-config", "name": "some-name-repl1", "name": "some-name-repl1", "name": "some-name-repl2", "name": "some-name-repl2", "name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl2","pgo-version": "1.6.0" "name": "some-name-required", "name": "some-name-required", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo" "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-backrest-shared-repo", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer", "name": "some-name-required-pgbouncer-cm" "name": "some-name-required-pgbouncer-secret", "name": "some-name-required-pgbouncer-secret", "name": "some-name-required-pgbr-repo", "name": "some-name-required-pgha-config", "name": "some-name-required-pgha-config", "name": "some-name-required-repl1", "name": "some-name-required-repl1", "name": "some-name-required-repl2", "name": "some-name-required-repl2", "name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl2","pgo-version": "1.6.0" "name": "some-name-required-rmdata-bovj", "name": "some-name-required-rmdata-bovj", "name": "some-name-required-stanza-create", "name": "some-name-rmdata-rhlw", "name": "some-name-rmdata-rhlw", "name": "some-name-stanza-create", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tls-pgbouncer" "name": "tls-pgbouncer", "name": "tls-replication" "name": "tls-replication", "name": "tls-server" "name": "tls-server", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "path": "tls-replication.crt" "path": "tls-replication.key" "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"some-name"} "persistentVolumeClaim":{"claimName":"some-name-disabled"} "persistentVolumeClaim":{"claimName":"some-name-disabled-repl1"} "persistentVolumeClaim":{"claimName":"some-name-disabled-repl2"} "persistentVolumeClaim":{"claimName":"some-name-repl1"} "persistentVolumeClaim":{"claimName":"some-name-repl2"} "persistentVolumeClaim":{"claimName":"some-name-required"} "persistentVolumeClaim":{"claimName":"some-name-required-repl1"} "persistentVolumeClaim":{"claimName":"some-name-required-repl2"} "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name" "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled" "pg-cluster": "some-name-disabled", "pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl2","pgo-version": "1.6.0","name": "some-name-disabled-replica" "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pg-cluster": "some-name-required" "pg-cluster": "some-name-required", "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgouser": "admin","crunchy-pgha-scope": "some-name-required","pgo-version": "1.6.0","name": "some-name-required","pg-cluster": "some-name-required","deployment-name": "some-name-required" "pgo-version": "1.6.0","name": "some-name-disabled","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled","pgouser": "admin","crunchy-pgha-scope": "some-name-disabled" "pgo-version": "1.6.0","name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl1" "pgo-version": "1.6.0","name": "some-name-disabled-replica","pg-cluster": "some-name-disabled","deployment-name": "some-name-disabled-repl2" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl1" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl1" "pgo-version": "1.6.0","name": "some-name-replica","pg-cluster": "some-name","deployment-name": "some-name-repl2" "pgo-version": "1.6.0","name": "some-name-required","pg-cluster": "some-name-required","deployment-name": "some-name-required","pgouser": "admin","crunchy-pgha-scope": "some-name-required" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl1" "pgo-version": "1.6.0","name": "some-name-required-replica","pg-cluster": "some-name-required","deployment-name": "some-name-required-repl1" "pg-pod-anti-affinity": "disabled", "pg-pod-anti-affinity": "disabled", "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "required", "pg-pod-anti-affinity": "required", "pgrmdata": "true", "pgrmdata": "true", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 2022, "ports": [{ "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "replicas": 3, "requests": { "requests": { "require" "required", "requiredDuringSchedulingIgnoredDuringExecution": [ "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-disabled-backrest-repo-config" "secretName": "some-name-disabled-backrest-repo-config", "secretName": "some-name-disabled-backrest-repo-config" "secretName": "some-name-disabled-backrest-repo-config", "secretName": "some-name-disabled-postgres-secret" "secretName": "some-name-disabled-primaryuser-secret" "secretName": "some-name-disabled-some-name-secret" "secretName": "some-name-postgres-secret" "secretName": "some-name-primaryuser-secret" "secretName": "some-name-required-backrest-repo-config" "secretName": "some-name-required-backrest-repo-config", "secretName": "some-name-required-backrest-repo-config" "secretName": "some-name-required-backrest-repo-config", "secretName": "some-name-required-postgres-secret" "secretName": "some-name-required-primaryuser-secret" "secretName": "some-name-required-some-name-secret" "secretName": "some-name-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "serviceAccountName": "pgo-target", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-disabled-backrest-shared-repo", "service-name": "some-name-disabled-backrest-shared-repo", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-disabled-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-required-backrest-shared-repo", "service-name": "some-name-required-backrest-shared-repo", "service-name": "some-name-required-pgbouncer", "service-name": "some-name-required-pgbouncer", "service-name": "some-name-required-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "2Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "some-name" "some-name-required" "sources": [ "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1Gi" "strategy": { "targetPort": 2022, "template": { time="2024-04-23T16:10:16Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-23T16:10:58Z" level=error msg="get deployment some-name-repl1: deployments.apps \"some-name-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:11:03Z" level=error msg="get deployment some-name-repl2: deployments.apps \"some-name-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:11:41Z" level=error msg="Error completing workflow some-name-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T16:11:41Z" level=error msg="pgtasks.pg.percona.com \"some-name-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T16:12:18Z" level=warning msg="crv1 pgreplica some-name-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T16:12:18Z" level=warning msg="crv1 pgreplica some-name-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T16:14:15Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:17Z" level=error msg="ConfigMap Controller: cannot find pgcluster for configMap some-name-pgha-config (namespace affinity-13957),ignoring" func="github.com/percona/percona-postgresql-operator/internal/controller/configmap.(*Controller).handleConfigMapSync()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/configmap/synchandler.go:55" version=1.6.0 time="2024-04-23T16:14:17Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:18Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:19Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:20Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:21Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:22Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:28Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:32Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:33Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:36Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:38Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:39Z" level=error msg="pgclusters.pg.percona.com \"some-name\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:14:44Z" level=error msg="jobs.batch \"some-name-rmdata-rhlw\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T16:14:56Z" level=error msg="get deployment some-name-required-repl1: deployments.apps \"some-name-required-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:15:01Z" level=error msg="get deployment some-name-required-repl2: deployments.apps \"some-name-required-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:15:41Z" level=error msg="Error completing workflow some-name-required-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T16:15:41Z" level=error msg="pgtasks.pg.percona.com \"some-name-required-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T16:16:17Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T16:16:17Z" level=error msg="pod some-name-required-repl1-5c9dc47dbc-zkrls does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T16:16:17Z" level=error msg="pod some-name-required-repl1-5c9dc47dbc-zkrls does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T16:16:17Z" level=error msg="pod some-name-required-repl2-6f5d4c77c-kzsdr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T16:16:17Z" level=error msg="pod some-name-required-repl2-6f5d4c77c-kzsdr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T16:16:17Z" level=warning msg="crv1 pgreplica some-name-required-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T16:16:17Z" level=warning msg="crv1 pgreplica some-name-required-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T16:16:18Z" level=error func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).GetLocalConfigFromCluster()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:332" version=1.6.0 time="2024-04-23T16:16:18Z" level=error msg="pod some-name-required-repl1-5c9dc47dbc-zkrls does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T16:16:18Z" level=error msg="pod some-name-required-repl1-5c9dc47dbc-zkrls does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T16:16:18Z" level=error msg="pod some-name-required-repl2-6f5d4c77c-kzsdr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/kubeapi.ExecToPodThroughAPI()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/kubeapi/exec.go:76" version=1.6.0 time="2024-04-23T16:16:18Z" level=error msg="pod some-name-required-repl2-6f5d4c77c-kzsdr does not have a host assigned" func="github.com/percona/percona-postgresql-operator/internal/operator/config.(*LocalDB).Sync.func2()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/config/localdb.go:173" version=1.6.0 time="2024-04-23T16:17:52Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:17:54Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:17:55Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:17:56Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:17:57Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:00Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:05Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:09Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:11Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:12Z" level=error msg="pgclusters.pg.percona.com \"some-name-required\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-23T16:18:17Z" level=error msg="jobs.batch \"some-name-required-rmdata-bovj\" not found" func="github.com/percona/percona-postgresql-operator/internal/controller/job.(*Controller).handleRMDataUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/job/rmdatahandler.go:61" version=1.6.0 time="2024-04-23T16:18:31Z" level=error msg="get deployment some-name-disabled-repl1: deployments.apps \"some-name-disabled-repl1\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:18:36Z" level=error msg="get deployment some-name-disabled-repl2: deployments.apps \"some-name-disabled-repl2\" not found" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica.CreateReplicaResource()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgreplica/pgreplica.go:40" version=1.6.0 time="2024-04-23T16:19:16Z" level=error msg="Error completing workflow some-name-disabled-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T16:19:16Z" level=error msg="pgtasks.pg.percona.com \"some-name-disabled-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 time="2024-04-23T16:19:53Z" level=warning msg="crv1 pgreplica some-name-disabled-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-23T16:19:53Z" level=warning msg="crv1 pgreplica some-name-disabled-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-disabled-backrest-shared-repo" "value": "/backrestrepo/some-name-disabled-backrest-shared-repo" "value": "/backrestrepo/some-name-required-backrest-shared-repo" "value": "/backrestrepo/some-name-required-backrest-shared-repo" "value": "backup" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.55.249.17 --db-path=/pgdata/some-name" "value": " --db-host=10.55.249.21 --db-path=/pgdata/some-name-required" "value": " --db-host=10.55.249.25 --db-path=/pgdata/some-name-disabled" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/some-name" "value": "/pgdata/some-name" "value": "/pgdata/some-name-disabled" "value": "/pgdata/some-name-disabled" "value": "/pgdata/some-name-disabled-repl1" "value": "/pgdata/some-name-disabled-repl1" "value": "/pgdata/some-name-disabled-repl2" "value": "/pgdata/some-name-disabled-repl2" "value": "/pgdata/some-name-repl1" "value": "/pgdata/some-name-repl1" "value": "/pgdata/some-name-repl2" "value": "/pgdata/some-name-repl2" "value": "/pgdata/some-name-required" "value": "/pgdata/some-name-required" "value": "/pgdata/some-name-required-repl1" "value": "/pgdata/some-name-required-repl1" "value": "/pgdata/some-name-required-repl2" "value": "/pgdata/some-name-required-repl2" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "some-name" "value": "some-name-backrest-shared-repo" "value": "some-name-backrest-shared-repo-6f8f947dbc-fbx5j" "value": "some-name-disabled" "value": "some-name-disabled-backrest-shared-repo" "value": "some-name-disabled-backrest-shared-repo-6d97b67686-mddxz" "value": "some-name-required" "value": "some-name-required-backrest-shared-repo" "value": "some-name-required-backrest-shared-repo-8558c9b6fc-ss9q6" "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.55.249.17 --db-path=/pgdata/some-name" "value": "--type=full --db-host=10.55.249.21 --db-path=/pgdata/some-name-required" "value": "--type=full --db-host=10.55.249.25 --db-path=/pgdata/some-name-disabled" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sDkZe5kO5W ++ mktemp + local LAST_ERR=/tmp/tmp.TmTjTuJUV0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.IIbhFo6FJI/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.sDkZe5kO5W + cat /tmp/tmp.TmTjTuJUV0 Error from server (NotFound): error when deleting "/tmp/tmp.IIbhFo6FJI/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.IIbhFo6FJI/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.IIbhFo6FJI/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.IIbhFo6FJI/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.IIbhFo6FJI/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.sDkZe5kO5W /tmp/tmp.TmTjTuJUV0 + return 1 + true + yq w -d4 /tmp/tmp.IIbhFo6FJI/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.IIbhFo6FJI/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jeGzFtx9AF ++ mktemp + local LAST_ERR=/tmp/tmp.9H6pRXbChI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.IIbhFo6FJI/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jeGzFtx9AF serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.9H6pRXbChI + rm /tmp/tmp.jeGzFtx9AF /tmp/tmp.9H6pRXbChI + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=affinity-13957 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n affinity-13957 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.IIbhFo6FJI/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.H99NnpanHv ++ mktemp + local LAST_ERR=/tmp/tmp.ixNAmFaWAi + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.IIbhFo6FJI/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.H99NnpanHv serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.ixNAmFaWAi + rm /tmp/tmp.H99NnpanHv /tmp/tmp.ixNAmFaWAi + return 0 + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + kubectl_bin get ns + xargs kubectl delete ns --grace-period=0 --force=true ++ mktemp + local LAST_OUT=/tmp/tmp.trsf8hbZt6 ++ mktemp + local LAST_ERR=/tmp/tmp.An94VP3kcg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.trsf8hbZt6 + cat /tmp/tmp.An94VP3kcg + rm /tmp/tmp.trsf8hbZt6 /tmp/tmp.An94VP3kcg + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "affinity-13957" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.qL42N8nw2M ++ mktemp + local LAST_ERR=/tmp/tmp.1gwj0Ylzdg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.qL42N8nw2M + cat /tmp/tmp.1gwj0Ylzdg Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.qL42N8nw2M /tmp/tmp.1gwj0Ylzdg + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.R6JQGaUkD1 ++ mktemp + local LAST_ERR=/tmp/tmp.HXLLvzmm2N + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.R6JQGaUkD1 + cat /tmp/tmp.HXLLvzmm2N Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.R6JQGaUkD1 /tmp/tmp.HXLLvzmm2N + return 1 + true + rm -rf /tmp/tmp.IIbhFo6FJI