++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/operator-self-healing.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/operator-self-healing.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.15.1 ++ '[' v3 == v2 ']' + main + create_namespace operator-self-healing-30315 + local namespace=operator-self-healing-30315 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + awk '{print$1}' + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.2UWPYdZlws ++ mktemp + local LAST_OUT=/tmp/tmp.mQXdcIV6FP + local LAST_ERR=/tmp/tmp.XQS7LKevbA + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.I853ANMRxA + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl get ns + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.2UWPYdZlws + cat /tmp/tmp.XQS7LKevbA + rm /tmp/tmp.2UWPYdZlws /tmp/tmp.XQS7LKevbA + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.mQXdcIV6FP + cat /tmp/tmp.I853ANMRxA Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.mQXdcIV6FP /tmp/tmp.I853ANMRxA + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.zNyYaMePxR ++ mktemp + local LAST_ERR=/tmp/tmp.22U4odWMuX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.zNyYaMePxR + cat /tmp/tmp.22U4odWMuX Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.zNyYaMePxR /tmp/tmp.22U4odWMuX + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace operator-self-healing-30315 ++ mktemp + local LAST_OUT=/tmp/tmp.Xi71gTgpPy ++ mktemp + local LAST_ERR=/tmp/tmp.59PsD9eFNS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace operator-self-healing-30315 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace operator-self-healing-30315 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace operator-self-healing-30315 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.Xi71gTgpPy + cat /tmp/tmp.59PsD9eFNS Error from server (NotFound): namespaces "operator-self-healing-30315" not found + rm /tmp/tmp.Xi71gTgpPy /tmp/tmp.59PsD9eFNS + return 1 + : + wait_for_delete namespace/operator-self-healing-30315 + local res=namespace/operator-self-healing-30315 + set +o xtrace namespace/operator-self-healing-30315 - Error from server (NotFound): namespaces "operator-self-healing-30315" not found + kubectl_bin create namespace operator-self-healing-30315 ++ mktemp + local LAST_OUT=/tmp/tmp.0LGxesfv4S ++ mktemp + local LAST_ERR=/tmp/tmp.4xZd9vKUwW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl create namespace operator-self-healing-30315 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0LGxesfv4S namespace/operator-self-healing-30315 created + cat /tmp/tmp.4xZd9vKUwW + rm /tmp/tmp.0LGxesfv4S /tmp/tmp.4xZd9vKUwW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ddmOxh1rJg +++ mktemp ++ local LAST_ERR=/tmp/tmp.DSuDBJrdYU ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ddmOxh1rJg ++ cat /tmp/tmp.DSuDBJrdYU ++ rm /tmp/tmp.ddmOxh1rJg /tmp/tmp.DSuDBJrdYU ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox --namespace=operator-self-healing-30315 ++ mktemp + local LAST_OUT=/tmp/tmp.70HctYHneI ++ mktemp + local LAST_ERR=/tmp/tmp.Q4sHAvlRHx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox --namespace=operator-self-healing-30315 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.70HctYHneI Context "gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox" modified. + cat /tmp/tmp.Q4sHAvlRHx + rm /tmp/tmp.70HctYHneI /tmp/tmp.Q4sHAvlRHx + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=operator-self-healing-30315 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml disabled install operator-self-healing-30315 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=operator-self-healing-30315 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "operator-self-healing-30315"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "operator-self-healing-30315"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + cat /dev/fd/63 - ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' ++ echo 'crunchy_debug: "true"' + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-776-0087ef129"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml metadata.namespace operator-self-healing-30315 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-776-0087ef129-pgo-deployer + yq w -d3 - 'subjects[0].namespace' operator-self-healing-30315 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.qmqO8LWxl0/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "operator-self-healing-30315" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-776-0087ef129" pgo_operator_namespace: "operator-self-healing-30315"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.qmqO8LWxl0/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Bkzd3WDKiL ++ mktemp + local LAST_ERR=/tmp/tmp.qhNVzoTZfQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.qmqO8LWxl0/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Bkzd3WDKiL serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.qhNVzoTZfQ + rm /tmp/tmp.Bkzd3WDKiL /tmp/tmp.qhNVzoTZfQ + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=operator-self-healing-30315 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 35 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 36 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.o410Cyx2Pp ++ mktemp + local LAST_ERR=/tmp/tmp.0negvoLCX3 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.o410Cyx2Pp serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.0negvoLCX3 + rm /tmp/tmp.o410Cyx2Pp /tmp/tmp.0negvoLCX3 + return 0 ++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ciPYsLLitO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9k4V0OBLBG +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ciPYsLLitO +++ cat /tmp/tmp.9k4V0OBLBG +++ rm /tmp/tmp.ciPYsLLitO /tmp/tmp.9k4V0OBLBG +++ return 0 ++ echo postgres-operator-5fd78658f6-gwjfs + wait_pod postgres-operator-5fd78658f6-gwjfs + local pod=postgres-operator-5fd78658f6-gwjfs + local ns= + set +o xtrace postgres-operator-5fd78658f6-gwjfstrue + [[ -n '' ]] + deploy_chaos_mesh operator-self-healing-30315 + local chaos_mesh_ns=operator-self-healing-30315 + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local old_cm_namespace=NAMESPACE + '[' NAMESPACE '!=' NAMESPACE ']' + helm repo remove chaos-mesh "chaos-mesh" has been removed from your repositories + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" has been added to your repositories + case ${PLATFORM} in + version_gt 1.19 ++ echo '1.26 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=operator-self-healing-30315 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version v2.0.4 --set clusterScoped=false --set controllerManager.targetNamespace=operator-self-healing-30315 NAME: chaos-mesh LAST DEPLOYED: Fri May 24 20:54:44 2024 NAMESPACE: operator-self-healing-30315 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace operator-self-healing-30315 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + cluster=some-name-30046 + spinup_pgcluster some-name-30046 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/cr.yaml + local cluster=some-name-30046 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + local namespace=operator-self-healing-30315 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-30046 operator-self-healing-30315 + local cluster=some-name-30046 + local namespace=operator-self-healing-30315 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml + yq w - stringData.some-name some-name_pass + yq w - metadata.name some-name-30046-users-secret + kubectl_bin -n operator-self-healing-30315 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yIVlwCYlhx ++ mktemp + local LAST_ERR=/tmp/tmp.iwdjYsu6w0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yIVlwCYlhx secret/some-name-30046-users-secret created + cat /tmp/tmp.iwdjYsu6w0 + rm /tmp/tmp.yIVlwCYlhx /tmp/tmp.iwdjYsu6w0 + return 0 + yq w - metadata.name some-name-30046-pmm-secret + kubectl_bin -n operator-self-healing-30315 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.LyYrl4RuQR ++ mktemp + local LAST_ERR=/tmp/tmp.kx0qOkpHBj + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LyYrl4RuQR secret/some-name-30046-pmm-secret created + cat /tmp/tmp.kx0qOkpHBj + rm /tmp/tmp.LyYrl4RuQR /tmp/tmp.kx0qOkpHBj + return 0 + kubectl_bin -n operator-self-healing-30315 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.0jYtzEr7DV ++ mktemp + local LAST_ERR=/tmp/tmp.RWh7sXBnqW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0jYtzEr7DV deployment.apps/pg-client created + cat /tmp/tmp.RWh7sXBnqW + rm /tmp/tmp.0jYtzEr7DV /tmp/tmp.RWh7sXBnqW + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/cr.yaml some-name-30046 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/cr.yaml + local name=some-name-30046 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/cr.yaml metadata.annotations.current-primary some-name-30046 + yq w - metadata.labels.deployment-name some-name-30046 + yq w - metadata.labels.name some-name-30046 + yq w - spec.keepBackups false + yq w - spec.name some-name-30046 + yq w - spec.keepData false + yq w - spec.clustername some-name-30046 + yq w - spec.database some-name-30046 + yq w - spec.user some-name + yq w - metadata.name some-name-30046 + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - metadata.labels.pg-cluster some-name-30046 + yq w - metadata.labels.crunchy-pgha-scope some-name-30046 + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.secretsName some-name-30046-users-secret + yq w - spec.pmm.image perconalab/pmm-client:2.41.2 + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.qmqO8LWxl0/cr.yaml -n operator-self-healing-30315 ++ mktemp + local LAST_OUT=/tmp/tmp.qim308xDyy ++ mktemp + local LAST_ERR=/tmp/tmp.Nho8gPm2Ev + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.qmqO8LWxl0/cr.yaml -n operator-self-healing-30315 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qim308xDyy perconapgcluster.pg.percona.com/some-name-30046 created + cat /tmp/tmp.Nho8gPm2Ev + rm /tmp/tmp.qim308xDyy /tmp/tmp.Nho8gPm2Ev + return 0 + wait_deployment some-name-30046-backrest-shared-repo + local name=some-name-30046-backrest-shared-repo + local target_namespace=operator-self-healing-30315 + sleep 10 + set +o xtrace some-name-30046-backrest-shared-repo..... + wait_deployment some-name-30046 + local name=some-name-30046 + local target_namespace=operator-self-healing-30315 + sleep 10 + set +o xtrace some-name-30046........ + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-30046-stanza-create false + local job=some-name-30046-stanza-create + local mandatory=false + local ns=operator-self-healing-30315 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' some-name-30046-stanza-create+ retry=0 ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/some-name-30046-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-some-name-30046 + local job=backrest-backup-some-name-30046 + local mandatory=true + local ns=operator-self-healing-30315 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 get job/backrest-backup-some-name-30046 -o 'jsonpath={.metadata.name}' backrest-backup-some-name-30046+ retry=0 ++ kubectl_bin -n operator-self-healing-30315 get job/backrest-backup-some-name-30046 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/backrest-backup-some-name-30046 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-30046 + cluster_name=some-name-30046 + wait_cluster_status some-name-30046 'pgcluster Initialized' + local cluster_name=some-name-30046 + local 'target_status=pgcluster Initialized' + local target_namespace=operator-self-healing-30315 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.USbtEqhuml +++ mktemp ++ local LAST_ERR=/tmp/tmp.T8d9MpUhgx ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.USbtEqhuml ++ cat /tmp/tmp.T8d9MpUhgx ++ rm /tmp/tmp.USbtEqhuml /tmp/tmp.T8d9MpUhgx ++ return 0 + status_query='kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5EtP6HD94d ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ozzd7FoT4A +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.5EtP6HD94d +++ cat /tmp/tmp.ozzd7FoT4A +++ rm /tmp/tmp.5EtP6HD94d /tmp/tmp.ozzd7FoT4A +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-30046 ++ local user=postgres ++ local cluster=some-name-30046 ++ local namespace=operator-self-healing-30315 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uL7S6B79TY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0PnFzoEK05 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.uL7S6B79TY +++ cat /tmp/tmp.0PnFzoEK05 +++ rm /tmp/tmp.uL7S6B79TY /tmp/tmp.0PnFzoEK05 +++ return 0 ++ [[ xsome-name-30046 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kj2ta9JjlV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1FKxKHjYeR +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kj2ta9JjlV +++ cat /tmp/tmp.1FKxKHjYeR +++ rm /tmp/tmp.kj2ta9JjlV /tmp/tmp.1FKxKHjYeR +++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DRMSOOgWg8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6OcMG6LuLo +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.DRMSOOgWg8 +++ cat /tmp/tmp.6OcMG6LuLo +++ rm /tmp/tmp.DRMSOOgWg8 /tmp/tmp.6OcMG6LuLo +++ return 0 ++ [[ xsome-name-30046-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.p1N15tco3c +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BdSl61DaSy ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.p1N15tco3c ++++ cat /tmp/tmp.BdSl61DaSy ++++ rm /tmp/tmp.p1N15tco3c /tmp/tmp.BdSl61DaSy ++++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0GXgIyMiKQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vdlgbXa4EF +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.0GXgIyMiKQ +++ cat /tmp/tmp.vdlgbXa4EF +++ rm /tmp/tmp.0GXgIyMiKQ /tmp/tmp.vdlgbXa4EF +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pFHyujMrEd +++ mktemp ++ local LAST_ERR=/tmp/tmp.nrdOkE0sZg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.pFHyujMrEd ++ cat /tmp/tmp.nrdOkE0sZg ++ rm /tmp/tmp.pFHyujMrEd /tmp/tmp.nrdOkE0sZg ++ return 0 + local client_container=pg-client-76df9df876-hx7fl + kubectl_bin exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.sm9jPvzw23 ++ mktemp + local LAST_ERR=/tmp/tmp.MyVDpDAF2e + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.sm9jPvzw23 + cat /tmp/tmp.MyVDpDAF2e + rm /tmp/tmp.sm9jPvzw23 /tmp/tmp.MyVDpDAF2e + return 0 ++ get_psql_user_pass postgres some-name-30046 ++ local user=postgres ++ local cluster=some-name-30046 ++ local namespace=operator-self-healing-30315 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qFqdFheEbq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8E7YFup3kQ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qFqdFheEbq +++ cat /tmp/tmp.8E7YFup3kQ +++ rm /tmp/tmp.qFqdFheEbq /tmp/tmp.8E7YFup3kQ +++ return 0 ++ [[ xsome-name-30046 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aAwghyRwcO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QrFrdVrQt2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.aAwghyRwcO +++ cat /tmp/tmp.QrFrdVrQt2 +++ rm /tmp/tmp.aAwghyRwcO /tmp/tmp.QrFrdVrQt2 +++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tqWJW15Txb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sm9DIfc7db +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tqWJW15Txb +++ cat /tmp/tmp.sm9DIfc7db +++ rm /tmp/tmp.tqWJW15Txb /tmp/tmp.sm9DIfc7db +++ return 0 ++ [[ xsome-name-30046-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QTo2F6WrOQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ihCkYC5tYx ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.QTo2F6WrOQ ++++ cat /tmp/tmp.ihCkYC5tYx ++++ rm /tmp/tmp.QTo2F6WrOQ /tmp/tmp.ihCkYC5tYx ++++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g8gvtgH1fQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YzSXLF43W9 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.g8gvtgH1fQ +++ cat /tmp/tmp.YzSXLF43W9 +++ rm /tmp/tmp.g8gvtgH1fQ /tmp/tmp.YzSXLF43W9 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DEN7ZlgL10 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RrItUx3rUO ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.DEN7ZlgL10 ++ cat /tmp/tmp.RrItUx3rUO ++ rm /tmp/tmp.DEN7ZlgL10 /tmp/tmp.RrItUx3rUO ++ return 0 + local client_container=pg-client-76df9df876-hx7fl + kubectl_bin exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.4DYIqkcxHw ++ mktemp + local LAST_ERR=/tmp/tmp.R8H4VSjR1N + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.4DYIqkcxHw + cat /tmp/tmp.R8H4VSjR1N + rm /tmp/tmp.4DYIqkcxHw /tmp/tmp.R8H4VSjR1N + return 0 ++ get_psql_user_pass postgres some-name-30046 ++ local user=postgres ++ local cluster=some-name-30046 ++ local namespace=operator-self-healing-30315 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.THVM1uS2VY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KhDPNN4y39 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.THVM1uS2VY +++ cat /tmp/tmp.KhDPNN4y39 +++ rm /tmp/tmp.THVM1uS2VY /tmp/tmp.KhDPNN4y39 +++ return 0 ++ [[ xsome-name-30046 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PZLLCYiZ1S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZPzZnJUMY8 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name-30046 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.PZLLCYiZ1S +++ cat /tmp/tmp.ZPzZnJUMY8 +++ rm /tmp/tmp.PZLLCYiZ1S /tmp/tmp.ZPzZnJUMY8 +++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SOxkWyUa4s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jZLw3291I2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.SOxkWyUa4s +++ cat /tmp/tmp.jZLw3291I2 +++ rm /tmp/tmp.SOxkWyUa4s /tmp/tmp.jZLw3291I2 +++ return 0 ++ [[ xsome-name-30046-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tMKqfwCaWI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ce7DumyIZC ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n operator-self-healing-30315 get perconapgcluster/some-name-30046 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.tMKqfwCaWI ++++ cat /tmp/tmp.Ce7DumyIZC ++++ rm /tmp/tmp.tMKqfwCaWI /tmp/tmp.Ce7DumyIZC ++++ return 0 +++ kubectl_bin -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8yOllhFctZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ug5XgP7rip +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n operator-self-healing-30315 get secret/some-name-30046-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8yOllhFctZ +++ cat /tmp/tmp.ug5XgP7rip +++ rm /tmp/tmp.8yOllhFctZ /tmp/tmp.ug5XgP7rip +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-30046.operator-self-healing-30315 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i7lzdOpSM4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LqPVLaHq46 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n operator-self-healing-30315 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.i7lzdOpSM4 ++ cat /tmp/tmp.LqPVLaHq46 ++ rm /tmp/tmp.i7lzdOpSM4 /tmp/tmp.LqPVLaHq46 ++ return 0 + local client_container=pg-client-76df9df876-hx7fl + kubectl_bin exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.yugoDq2QsB ++ mktemp + local LAST_ERR=/tmp/tmp.ohJpPSBC4N + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-hx7fl -n operator-self-healing-30315 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-30046.operator-self-healing-30315.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yugoDq2QsB + cat /tmp/tmp.ohJpPSBC4N + rm /tmp/tmp.yugoDq2QsB /tmp/tmp.ohJpPSBC4N + return 0 + sleep 10 ++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QXhk2o77cr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qbFLCs5L6P +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QXhk2o77cr +++ cat /tmp/tmp.qbFLCs5L6P +++ rm /tmp/tmp.QXhk2o77cr /tmp/tmp.qbFLCs5L6P +++ return 0 ++ echo postgres-operator-5fd78658f6-gwjfs + kill_pod postgres-operator-5fd78658f6-gwjfs + local pod=postgres-operator-5fd78658f6-gwjfs + yq w - metadata.name chaos-cluster-pod-kill-13217 + yq w - metadata.namespace operator-self-healing-30315 + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/chaos-pod-kill.yml ++ mktemp + yq w - 'spec.selector.pods.operator-self-healing-30315[0]' postgres-operator-5fd78658f6-gwjfs + local LAST_OUT=/tmp/tmp.xnvaDKo3IA ++ mktemp + local LAST_ERR=/tmp/tmp.XGRWKuSmZX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.xnvaDKo3IA podchaos.chaos-mesh.org/chaos-cluster-pod-kill-13217 created + cat /tmp/tmp.XGRWKuSmZX + rm /tmp/tmp.xnvaDKo3IA /tmp/tmp.XGRWKuSmZX + return 0 + sleep 5 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-30315 + sleep 10 + set +o xtrace postgres-operator + kubectl_bin patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' ++ mktemp + local LAST_OUT=/tmp/tmp.HHHlLjQDdB ++ mktemp + local LAST_ERR=/tmp/tmp.mf2ZqXgfsD + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.HHHlLjQDdB perconapgcluster.pg.percona.com/some-name-30046 patched + cat /tmp/tmp.mf2ZqXgfsD + rm /tmp/tmp.HHHlLjQDdB /tmp/tmp.mf2ZqXgfsD + return 0 + wait_deployments_removal some-name-30046-replica 0 + local name=some-name-30046-replica + local target_size=0 + set +o xtrace ......++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tcIyEQvVkJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vRCpbJVHf0 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tcIyEQvVkJ +++ cat /tmp/tmp.vRCpbJVHf0 +++ rm /tmp/tmp.tcIyEQvVkJ /tmp/tmp.vRCpbJVHf0 +++ return 0 ++ echo postgres-operator-5fd78658f6-c56z8 + failure_pod postgres-operator-5fd78658f6-c56z8 + local pod=postgres-operator-5fd78658f6-c56z8 + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/chaos-pod-failure.yml + yq w - metadata.name chaos-cluster-pod-failure-24537 + yq w - metadata.namespace operator-self-healing-30315 + kubectl_bin apply -f - ++ mktemp + yq w - 'spec.selector.pods.operator-self-healing-30315[0]' postgres-operator-5fd78658f6-c56z8 + local LAST_OUT=/tmp/tmp.AxRpisZ6J8 ++ mktemp + local LAST_ERR=/tmp/tmp.QBfkYN9Z7H + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.AxRpisZ6J8 podchaos.chaos-mesh.org/chaos-cluster-pod-failure-24537 created + cat /tmp/tmp.QBfkYN9Z7H + rm /tmp/tmp.AxRpisZ6J8 /tmp/tmp.QBfkYN9Z7H + return 0 + sleep 10 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-30315 + sleep 10 + set +o xtrace postgres-operator........... + kubectl_bin patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":2}]' ++ mktemp + local LAST_OUT=/tmp/tmp.xXdgBAZx5o ++ mktemp + local LAST_ERR=/tmp/tmp.wXxuX9SXGB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":2}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.xXdgBAZx5o perconapgcluster.pg.percona.com/some-name-30046 patched + cat /tmp/tmp.wXxuX9SXGB + rm /tmp/tmp.xXdgBAZx5o /tmp/tmp.wXxuX9SXGB + return 0 + wait_deployments_removal some-name-30046-replica 2 + local name=some-name-30046-replica + local target_size=2 + set +o xtrace .++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Av2WyX7zit ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p6xPoPRdAl +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Av2WyX7zit +++ cat /tmp/tmp.p6xPoPRdAl +++ rm /tmp/tmp.Av2WyX7zit /tmp/tmp.p6xPoPRdAl +++ return 0 ++ echo postgres-operator-5fd78658f6-c56z8 + network_loss postgres-operator-5fd78658f6-c56z8 + local pod=postgres-operator-5fd78658f6-c56z8 + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/chaos-network-loss.yml + yq w - metadata.namespace operator-self-healing-30315 + yq w - 'spec.selector.pods.operator-self-healing-30315[0]' postgres-operator-5fd78658f6-c56z8 + kubectl_bin apply -f - + yq w - metadata.name chaos-cluster-network-loss-16199 ++ mktemp + local LAST_OUT=/tmp/tmp.381wdAyGPv ++ mktemp + local LAST_ERR=/tmp/tmp.tatOAaHQuu + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.381wdAyGPv networkchaos.chaos-mesh.org/chaos-cluster-network-loss-16199 created + cat /tmp/tmp.tatOAaHQuu + rm /tmp/tmp.381wdAyGPv /tmp/tmp.tatOAaHQuu + return 0 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-30315 + sleep 10 + set +o xtrace postgres-operator + kubectl_bin patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' ++ mktemp + local LAST_OUT=/tmp/tmp.yXmzc9keyr ++ mktemp + local LAST_ERR=/tmp/tmp.SYmm1bBbeW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl patch perconapgcluster/some-name-30046 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yXmzc9keyr perconapgcluster.pg.percona.com/some-name-30046 patched + cat /tmp/tmp.SYmm1bBbeW + rm /tmp/tmp.yXmzc9keyr /tmp/tmp.SYmm1bBbeW + return 0 + wait_deployments_removal some-name-30046-replica 0 + local name=some-name-30046-replica + local target_size=0 + set +o xtrace .....................................+ kubectl delete NetworkChaos --all networkchaos.chaos-mesh.org "chaos-cluster-network-loss-16199" deleted + kubectl delete PodChaos --all podchaos.chaos-mesh.org "chaos-cluster-pod-failure-24537" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill-13217" deleted + sleep 5 + destroy_chaos_mesh operator-self-healing-30315 + local chaos_mesh_ns=operator-self-healing-30315 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + helm del chaos-mesh --namespace operator-self-healing-30315 release "chaos-mesh" uninstalled + destroy operator-self-healing-30315 + grep -v level=info ++ get_operator_pod + grep -v level=debug + grep -v 'Getting tasks for pod' + grep -v 'Getting pods from source' + tee /tmp/tmp.qmqO8LWxl0/operator.log + grep -v 'get backup status: Job.batch' +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + grep -v 'the object has been modified' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iYvRGF5XkB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.69tlGxnpTe +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iYvRGF5XkB +++ cat /tmp/tmp.69tlGxnpTe +++ rm /tmp/tmp.iYvRGF5XkB /tmp/tmp.69tlGxnpTe +++ return 0 ++ echo postgres-operator-5fd78658f6-c56z8 + kubectl_bin logs postgres-operator-5fd78658f6-c56z8 -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.Pd1q13h9gp ++ mktemp + local LAST_ERR=/tmp/tmp.mr7t9Ouk9R + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl logs postgres-operator-5fd78658f6-c56z8 -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Pd1q13h9gp + cat /tmp/tmp.mr7t9Ouk9R + rm /tmp/tmp.Pd1q13h9gp /tmp/tmp.mr7t9Ouk9R + return 0 ] { } }, ] } }, { } }, ] { } }, { } ] } { } }, ] ] { } }, }, { { } }, ] ], { } }, }, { }, { }], ], } { } }, } ], } }, } }, } { } }, }, 2024/05/24 20:58:31 INF 1 (localhost:4150) connecting to nsqd 2024/05/24 20:58:32 INF 2 (localhost:4150) connecting to nsqd 2024/05/24 20:58:50 ERR 1 (localhost:4150) IO error - EOF 2024/05/24 20:58:50 ERR 2 (localhost:4150) IO error - EOF 2024/05/24 20:58:50 INF 1 exiting router 2024/05/24 20:58:50 INF 1 (localhost:4150) beginning close 2024/05/24 20:58:50 INF 1 (localhost:4150) breaking out of writeLoop 2024/05/24 20:58:50 INF 1 (localhost:4150) clean close complete 2024/05/24 20:58:50 INF 1 (localhost:4150) finished draining, cleanup exiting 2024/05/24 20:58:50 INF 1 (localhost:4150) readLoop exiting 2024/05/24 20:58:50 INF 1 (localhost:4150) writeLoop exiting 2024/05/24 20:58:50 INF 2 exiting router 2024/05/24 20:58:50 INF 2 (localhost:4150) beginning close 2024/05/24 20:58:50 INF 2 (localhost:4150) breaking out of writeLoop 2024/05/24 20:58:50 INF 2 (localhost:4150) clean close complete 2024/05/24 20:58:50 INF 2 (localhost:4150) finished draining, cleanup exiting 2024/05/24 20:58:50 INF 2 (localhost:4150) readLoop exiting 2024/05/24 20:58:50 INF 2 (localhost:4150) writeLoop exiting 2024/05/24 20:59:33 INF 3 (localhost:4150) connecting to nsqd 2024/05/24 20:59:34 INF 4 (localhost:4150) connecting to nsqd 2024/05/24 20:59:39 INF 5 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "v1", "apiVersion": "v1", "command": [ "configMap": { "containerName": "database", "containerPort": 5432, "containerPort": 8009, "containers": [ "cpu": "500m", "crunchydata" "defaultMode": 288, "defaultMode": 420, "deployment-name": "some-name-30046-repl1" "deployment-name": "some-name-30046-repl2" "deployment-name": "some-name-30046-repl2","pgo-version": "1.7.0","name": "some-name-30046-replica","pg-cluster": "some-name-30046" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { E0524 20:59:28.240895 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pgtask: failed to list *v1.Pgtask: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgtasks?resourceVersion=106345": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.240931 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pod: failed to list *v1.Pod: Get "https://10.99.224.1:443/api/v1/namespaces/operator-self-healing-30315/pods?resourceVersion=106590": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.240943 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Job: failed to list *v1.Job: Get "https://10.99.224.1:443/apis/batch/v1/namespaces/operator-self-healing-30315/jobs?resourceVersion=106355": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.241005 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pgreplica: failed to list *v1.Pgreplica: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgreplicas?resourceVersion=106523": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.241029 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://10.99.224.1:443/api/v1/namespaces/operator-self-healing-30315/configmaps?resourceVersion=106587": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.241118 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pgcluster: failed to list *v1.Pgcluster: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgclusters?resourceVersion=106355": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.241126 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.PerconaPGCluster: failed to list *v1.PerconaPGCluster: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/perconapgclusters?resourceVersion=106481": dial tcp 10.99.224.1:443: connect: no route to host E0524 20:59:28.241231 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pgpolicy: failed to list *v1.Pgpolicy: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgpolicies?resourceVersion=106356": dial tcp 10.99.224.1:443: connect: no route to host "emptyDir": { "emptyDir": { "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { I0524 20:58:18.043738 1 shared_informer.go:240] Waiting for caches to sync for namespace I0524 20:58:18.144139 1 shared_informer.go:247] Caches are synced for namespace I0524 20:58:18.324406 1 shared_informer.go:240] Waiting for caches to sync for operator-self-healing-30315 I0524 20:58:18.424488 1 shared_informer.go:247] Caches are synced for operator-self-healing-30315 "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "imagePullPolicy": "IfNotPresent" "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "pg-cluster", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "tls.crt", "key": "tls.key", "key": "vendor", "kind": "Deployment", "kind": "PersistentVolumeClaim", "labels": { "labels": { "labelSelector": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "memory": "256Mi" "metadata": { "metadata": { "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/pgconf", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgconf/tls", "mountPath": "/pgconf/tls-replication", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/tmp", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "cluster1-replication-ssl-keypair", "name": "cluster1-ssl-ca" "name": "cluster1-ssl-keypair" "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "MODE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "podinfo" "name": "podinfo", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "root-volume" "name": "root-volume", "name": "some-name-30046-pgha-config", "name": "some-name-30046-repl1", "name": "some-name-30046-repl1", "name": "some-name-30046-repl2", "name": "some-name-30046-repl2", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "tls-replication" "name": "tls-replication", "name": "tls-server" "name": "tls-server", "name": "tmp" "name": "tmp", "name": "user-volume" "name": "user-volume", "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "tls-replication.crt" "path": "tls-replication.key" "periodSeconds": 15, "persistentVolumeClaim":{"claimName":"some-name-30046-repl1"} "persistentVolumeClaim":{"claimName":"some-name-30046-repl2"} "pg-cluster": "some-name-30046" "pg-cluster": "some-name-30046", "pg-cluster": "some-name-30046","deployment-name": "some-name-30046-repl1","pgo-version": "1.7.0","name": "some-name-30046-replica" "pgo-pg-database": "true", "pgo-pg-database": "true", "pgo-version": "1.7.0","name": "some-name-30046-replica","pg-cluster": "some-name-30046","deployment-name": "some-name-30046-repl1" "pgo-version": "1.7.0","name": "some-name-30046-replica","pg-cluster": "some-name-30046","deployment-name": "some-name-30046-repl2" "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "rollingUpdate": { "secret": { "secret": { "secretName": "some-name-30046-backrest-repo-config" "secretName": "some-name-30046-backrest-repo-config", "secretName": "some-name-30046-postgres-secret" "secretName": "some-name-30046-primaryuser-secret" "secretName": "some-name-30046-some-name-secret" "securityContext": { "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "selector": { "serviceAccountName": "pgo-pg", "sizeLimit": "16Mi" "sizeLimit": "2Mi" "sizeLimit": "64Mi" "some-name-30046" "sources": [ "sources": [ "spec": { "spec": { "storage": "1Gi" "strategy": { "template": { time="2024-05-24T20:58:17Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.7.0 time="2024-05-24T20:58:19Z" level=error msg="create pgcluster resource: create pgcluster resource: pgclusters.pg.percona.com \"some-name-30046\" already exists" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onAdd()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:174" version=1.7.0 time="2024-05-24T20:58:31Z" level=warning msg="crv1 pgreplica some-name-30046-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T20:58:32Z" level=warning msg="crv1 pgreplica some-name-30046-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T20:59:21Z" level=error msg="Get \"https://10.99.224.1:443/apis/apps/v1/namespaces/operator-self-healing-30315/deployments?labelSelector=pg-cluster%3Dsome-name-30046%2Cpgo-pg-database\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/configmap.(*Controller).createPGHAConfigs()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/configmap/synchandler.go:90" version=1.7.0 time="2024-05-24T20:59:21Z" level=error msg="Get \"https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgclusters/some-name-30046\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.7.0 time="2024-05-24T20:59:21Z" level=error msg="operator is unable to reconcile RBAC resource: Get \"https://10.99.224.1:443/api/v1/namespaces/operator-self-healing-30315/serviceaccounts/postgres-operator\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/manager.(*ControllerManager).reconcileRBAC()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/manager/rbac.go:51" version=1.7.0 time="2024-05-24T20:59:21Z" level=error msg="update perconapgcluster: update pgcluster: update pgPrimary deployment: wait deployment: get deployment: Get \"https://10.99.224.1:443/apis/apps/v1/namespaces/operator-self-healing-30315/deployments/some-name-30046\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:542" version=1.7.0 time="2024-05-24T20:59:33Z" level=warning msg="crv1 pgreplica some-name-30046-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 time="2024-05-24T20:59:33Z" level=warning msg="crv1 pgreplica some-name-30046-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.7.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-30046-backrest-shared-repo" "value": "crunchy-pgha-scope" "value": "db" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "/pgdata/some-name-30046-repl1" "value": "/pgdata/some-name-30046-repl1" "value": "/pgdata/some-name-30046-repl2" "value": "/pgdata/some-name-30046-repl2" "value": "posix" "value": "postgres" "values": [ "value": "some-name-30046" "value": "some-name-30046-backrest-shared-repo" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata", "vendor": "crunchydata", "volumeMounts": [{ "volumes": [{ W0524 20:59:21.164961 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Job ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.164969 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.164975 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.PerconaPGCluster ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.165390 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgtask ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.165406 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgcluster ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.165434 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgreplica ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.165442 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgpolicy ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:21.165469 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pod ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0524 20:59:28.240830 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pgtask: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgtasks?resourceVersion=106345": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.240830 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pod: Get "https://10.99.224.1:443/api/v1/namespaces/operator-self-healing-30315/pods?resourceVersion=106590": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.240918 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Job: Get "https://10.99.224.1:443/apis/batch/v1/namespaces/operator-self-healing-30315/jobs?resourceVersion=106355": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.240979 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pgreplica: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgreplicas?resourceVersion=106523": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.241007 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.ConfigMap: Get "https://10.99.224.1:443/api/v1/namespaces/operator-self-healing-30315/configmaps?resourceVersion=106587": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.241091 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.PerconaPGCluster: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/perconapgclusters?resourceVersion=106481": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.241094 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pgcluster: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgclusters?resourceVersion=106355": dial tcp 10.99.224.1:443: connect: no route to host W0524 20:59:28.241208 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pgpolicy: Get "https://10.99.224.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-30315/pgpolicies?resourceVersion=106356": dial tcp 10.99.224.1:443: connect: no route to host "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.n3C5wWHbxg ++ mktemp + local LAST_ERR=/tmp/tmp.TNxeRSOw0I + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.qmqO8LWxl0/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.n3C5wWHbxg + cat /tmp/tmp.TNxeRSOw0I Error from server (NotFound): error when deleting "/tmp/tmp.qmqO8LWxl0/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.qmqO8LWxl0/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.qmqO8LWxl0/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.qmqO8LWxl0/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.qmqO8LWxl0/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.n3C5wWHbxg /tmp/tmp.TNxeRSOw0I + return 1 + true + yq w -d4 /tmp/tmp.qmqO8LWxl0/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.qmqO8LWxl0/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sBfbJx5xLH ++ mktemp + local LAST_ERR=/tmp/tmp.142EOsERrq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.qmqO8LWxl0/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.sBfbJx5xLH serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.142EOsERrq + rm /tmp/tmp.sBfbJx5xLH /tmp/tmp.142EOsERrq + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=operator-self-healing-30315 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-30315 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.qmqO8LWxl0/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NPVQBwuAcj ++ mktemp + local LAST_ERR=/tmp/tmp.E4UyF2pAEG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.qmqO8LWxl0/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.NPVQBwuAcj serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.E4UyF2pAEG + rm /tmp/tmp.NPVQBwuAcj /tmp/tmp.E4UyF2pAEG + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' ++ mktemp + awk '{print$1}' + xargs kubectl delete ns --grace-period=0 --force=true + local LAST_OUT=/tmp/tmp.LhJmNB8lPg ++ mktemp + local LAST_ERR=/tmp/tmp.oYnxmsHjtz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LhJmNB8lPg + cat /tmp/tmp.oYnxmsHjtz + rm /tmp/tmp.LhJmNB8lPg /tmp/tmp.oYnxmsHjtz + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "operator-self-healing-30315" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.gpLAMcOqCG ++ mktemp + local LAST_ERR=/tmp/tmp.4ZkDSuaFKQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.gpLAMcOqCG + cat /tmp/tmp.4ZkDSuaFKQ Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.gpLAMcOqCG /tmp/tmp.4ZkDSuaFKQ + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.O8MvyeZIPe ++ mktemp + local LAST_ERR=/tmp/tmp.GP37x4mXDp + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.O8MvyeZIPe + cat /tmp/tmp.GP37x4mXDp Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.O8MvyeZIPe /tmp/tmp.GP37x4mXDp + return 1 + true + rm -rf /tmp/tmp.qmqO8LWxl0