++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/operator-self-healing.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/operator-self-healing.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + main + create_namespace operator-self-healing-28680 + local namespace=operator-self-healing-28680 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + awk '{print$1}' + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.ruCcHZ77hH ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ZpA7v6rTNr + local LAST_ERR=/tmp/tmp.UVMceSo7QS + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.XZkUzru6oo + local exit_status=0 + local timeout=4 + set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ruCcHZ77hH + cat /tmp/tmp.UVMceSo7QS + rm /tmp/tmp.ruCcHZ77hH /tmp/tmp.UVMceSo7QS + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.ZpA7v6rTNr + cat /tmp/tmp.XZkUzru6oo Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.ZpA7v6rTNr /tmp/tmp.XZkUzru6oo + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.Cnxijx73bW ++ mktemp + local LAST_ERR=/tmp/tmp.9EDRoqLsca + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.Cnxijx73bW + cat /tmp/tmp.9EDRoqLsca Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.Cnxijx73bW /tmp/tmp.9EDRoqLsca + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace operator-self-healing-28680 ++ mktemp + local LAST_OUT=/tmp/tmp.FJy5RuWF0f ++ mktemp + local LAST_ERR=/tmp/tmp.oVOEU69mIW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace operator-self-healing-28680 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace operator-self-healing-28680 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace operator-self-healing-28680 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.FJy5RuWF0f + cat /tmp/tmp.oVOEU69mIW Error from server (NotFound): namespaces "operator-self-healing-28680" not found + rm /tmp/tmp.FJy5RuWF0f /tmp/tmp.oVOEU69mIW + return 1 + : + wait_for_delete namespace/operator-self-healing-28680 + local res=namespace/operator-self-healing-28680 + set +o xtrace namespace/operator-self-healing-28680 - Error from server (NotFound): namespaces "operator-self-healing-28680" not found + kubectl_bin create namespace operator-self-healing-28680 ++ mktemp + local LAST_OUT=/tmp/tmp.3e0lTbNj2G ++ mktemp + local LAST_ERR=/tmp/tmp.gEoNkq9jo6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace operator-self-healing-28680 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.3e0lTbNj2G namespace/operator-self-healing-28680 created + cat /tmp/tmp.gEoNkq9jo6 + rm /tmp/tmp.3e0lTbNj2G /tmp/tmp.gEoNkq9jo6 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.22IapEeFFD +++ mktemp ++ local LAST_ERR=/tmp/tmp.2kgj8GiPTr ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.22IapEeFFD ++ cat /tmp/tmp.2kgj8GiPTr ++ rm /tmp/tmp.22IapEeFFD /tmp/tmp.2kgj8GiPTr ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=operator-self-healing-28680 ++ mktemp + local LAST_OUT=/tmp/tmp.OPDKl80uDd ++ mktemp + local LAST_ERR=/tmp/tmp.XqmdanKGtg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=operator-self-healing-28680 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.OPDKl80uDd Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.XqmdanKGtg + rm /tmp/tmp.OPDKl80uDd /tmp/tmp.XqmdanKGtg + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=operator-self-healing-28680 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install operator-self-healing-28680 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=operator-self-healing-28680 + local dont_send_telemetry=false + /usr/bin/sed -e 's#^namespace: .*#namespace: "operator-self-healing-28680"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "operator-self-healing-28680"#g' + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + cat /dev/fd/63 - ++ echo 'crunchy_debug: "true"' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f1 ++ cut -d: -f2 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w -d3 - 'subjects[0].namespace' operator-self-healing-28680 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace operator-self-healing-28680 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.KMBwDowh2k/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "operator-self-healing-28680" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "operator-self-healing-28680"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.KMBwDowh2k/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vewhc1MneL ++ mktemp + local LAST_ERR=/tmp/tmp.u4gkV5kNWN + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.KMBwDowh2k/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.vewhc1MneL serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.u4gkV5kNWN + rm /tmp/tmp.vewhc1MneL /tmp/tmp.u4gkV5kNWN + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=operator-self-healing-28680 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 35 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 36 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.KMBwDowh2k/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.FMVdneOUhS ++ mktemp + local LAST_ERR=/tmp/tmp.PdFYg6IvxV + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.KMBwDowh2k/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.FMVdneOUhS serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.PdFYg6IvxV + rm /tmp/tmp.FMVdneOUhS /tmp/tmp.PdFYg6IvxV + return 0 ++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tUMNhQPRDC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R11DJSprtL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tUMNhQPRDC +++ cat /tmp/tmp.R11DJSprtL +++ rm /tmp/tmp.tUMNhQPRDC /tmp/tmp.R11DJSprtL +++ return 0 ++ echo postgres-operator-bb4cd6646-bpb6v + wait_pod postgres-operator-bb4cd6646-bpb6v + local pod=postgres-operator-bb4cd6646-bpb6v + local ns= + set +o xtrace postgres-operator-bb4cd6646-bpb6vtrue + [[ -n '' ]] + deploy_chaos_mesh operator-self-healing-28680 + local chaos_mesh_ns=operator-self-healing-28680 + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local old_cm_namespace=NAMESPACE + '[' NAMESPACE '!=' NAMESPACE ']' + helm repo remove chaos-mesh "chaos-mesh" has been removed from your repositories + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" has been added to your repositories + case ${PLATFORM} in + version_gt 1.19 ++ echo '1.26 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=operator-self-healing-28680 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version v2.0.4 --set clusterScoped=false --set controllerManager.targetNamespace=operator-self-healing-28680 NAME: chaos-mesh LAST DEPLOYED: Wed Apr 24 19:38:13 2024 NAMESPACE: operator-self-healing-28680 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace operator-self-healing-28680 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + cluster=some-name-22160 + spinup_pgcluster some-name-22160 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local cluster=some-name-22160 + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=operator-self-healing-28680 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name-22160 operator-self-healing-28680 + local cluster=some-name-22160 + local namespace=operator-self-healing-28680 + yq w - metadata.name some-name-22160-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + kubectl_bin -n operator-self-healing-28680 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.S05dvuweGJ ++ mktemp + local LAST_ERR=/tmp/tmp.8PfS4ZyNsK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.S05dvuweGJ secret/some-name-22160-users-secret created + cat /tmp/tmp.8PfS4ZyNsK + rm /tmp/tmp.S05dvuweGJ /tmp/tmp.8PfS4ZyNsK + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - metadata.name some-name-22160-pmm-secret + kubectl_bin -n operator-self-healing-28680 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.7HsKzaFdBf ++ mktemp + local LAST_ERR=/tmp/tmp.GMU5xaV04x + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.7HsKzaFdBf secret/some-name-22160-pmm-secret created + cat /tmp/tmp.GMU5xaV04x + rm /tmp/tmp.7HsKzaFdBf /tmp/tmp.GMU5xaV04x + return 0 + kubectl_bin -n operator-self-healing-28680 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Nub5IfdGnk ++ mktemp + local LAST_ERR=/tmp/tmp.OCsyV0zjVZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Nub5IfdGnk deployment.apps/pg-client created + cat /tmp/tmp.OCsyV0zjVZ + rm /tmp/tmp.Nub5IfdGnk /tmp/tmp.OCsyV0zjVZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml some-name-22160 false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml + local name=some-name-22160 + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/cr.yaml metadata.annotations.current-primary some-name-22160 + yq w - metadata.labels.deployment-name some-name-22160 + yq w - metadata.labels.name some-name-22160 + yq w - spec.database some-name-22160 + yq w - spec.keepData false + yq w - spec.keepBackups false + yq w - spec.clustername some-name-22160 + yq w - spec.secretsName some-name-22160-users-secret + yq w - spec.user some-name + yq w - spec.name some-name-22160 + yq w - metadata.name some-name-22160 + yq w - metadata.labels.pg-cluster some-name-22160 + yq w - metadata.labels.crunchy-pgha-scope some-name-22160 + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.KMBwDowh2k/cr.yaml -n operator-self-healing-28680 ++ mktemp + local LAST_OUT=/tmp/tmp.Khy0gPIYuH ++ mktemp + local LAST_ERR=/tmp/tmp.viotApXqTH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.KMBwDowh2k/cr.yaml -n operator-self-healing-28680 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Khy0gPIYuH perconapgcluster.pg.percona.com/some-name-22160 created + cat /tmp/tmp.viotApXqTH + rm /tmp/tmp.Khy0gPIYuH /tmp/tmp.viotApXqTH + return 0 + wait_deployment some-name-22160-backrest-shared-repo + local name=some-name-22160-backrest-shared-repo + local target_namespace=operator-self-healing-28680 + sleep 10 + set +o xtrace some-name-22160-backrest-shared-repo.. + wait_deployment some-name-22160 + local name=some-name-22160 + local target_namespace=operator-self-healing-28680 + sleep 10 + set +o xtrace some-name-22160........... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-22160-stanza-create false + local job=some-name-22160-stanza-create + local mandatory=false + local ns=operator-self-healing-28680 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.metadata.name}' some-name-22160-stanza-create+ retry=0 ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/some-name-22160-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ not found == \n\o\t\ \f\o\u\n\d ]] + echo + wait_job_completion backrest-backup-some-name-22160 + local job=backrest-backup-some-name-22160 + local mandatory=true + local ns=operator-self-healing-28680 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' backrest-backup-some-name-22160+ retry=0 ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/backrest-backup-some-name-22160 -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name-22160 + cluster_name=some-name-22160 + wait_cluster_status some-name-22160 'pgcluster Initialized' + local cluster_name=some-name-22160 + local 'target_status=pgcluster Initialized' + local target_namespace=operator-self-healing-28680 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fw0vN85m45 +++ mktemp ++ local LAST_ERR=/tmp/tmp.v96FZDWmI3 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Fw0vN85m45 ++ cat /tmp/tmp.v96FZDWmI3 ++ rm /tmp/tmp.Fw0vN85m45 /tmp/tmp.v96FZDWmI3 ++ return 0 + status_query='kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g1QPTTZHY8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7AMQ9EsfB5 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.g1QPTTZHY8 +++ cat /tmp/tmp.7AMQ9EsfB5 +++ rm /tmp/tmp.g1QPTTZHY8 /tmp/tmp.7AMQ9EsfB5 +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EqtTeTPXn5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NH0iZzYzu8 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.EqtTeTPXn5 +++ cat /tmp/tmp.NH0iZzYzu8 +++ rm /tmp/tmp.EqtTeTPXn5 /tmp/tmp.NH0iZzYzu8 +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name-22160 ++ local user=postgres ++ local cluster=some-name-22160 ++ local namespace=operator-self-healing-28680 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WQT60y2vck ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OukuC2CWmY +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.WQT60y2vck +++ cat /tmp/tmp.OukuC2CWmY +++ rm /tmp/tmp.WQT60y2vck /tmp/tmp.OukuC2CWmY +++ return 0 ++ [[ xsome-name-22160 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GxRB7P0ffE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.57j6m737LC +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GxRB7P0ffE +++ cat /tmp/tmp.57j6m737LC +++ rm /tmp/tmp.GxRB7P0ffE /tmp/tmp.57j6m737LC +++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QNEFDFixVe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EQVRJBZ54r +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QNEFDFixVe +++ cat /tmp/tmp.EQVRJBZ54r +++ rm /tmp/tmp.QNEFDFixVe /tmp/tmp.EQVRJBZ54r +++ return 0 ++ [[ xsome-name-22160-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CdTPOCytaz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2qwZ9wL0B1 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.CdTPOCytaz ++++ cat /tmp/tmp.2qwZ9wL0B1 ++++ rm /tmp/tmp.CdTPOCytaz /tmp/tmp.2qwZ9wL0B1 ++++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3a7Ditzu2J ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MkK4OJOJ6b +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.3a7Ditzu2J +++ cat /tmp/tmp.MkK4OJOJ6b +++ rm /tmp/tmp.3a7Ditzu2J /tmp/tmp.MkK4OJOJ6b +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WiizeWf1f4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.48HKMiFolA ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.WiizeWf1f4 ++ cat /tmp/tmp.48HKMiFolA ++ rm /tmp/tmp.WiizeWf1f4 /tmp/tmp.48HKMiFolA ++ return 0 + local client_container=pg-client-76df9df876-l2dkh + kubectl_bin exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.KiGvngLsyq ++ mktemp + local LAST_ERR=/tmp/tmp.aogtXIqHbK + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.KiGvngLsyq + cat /tmp/tmp.aogtXIqHbK + rm /tmp/tmp.KiGvngLsyq /tmp/tmp.aogtXIqHbK + return 0 ++ get_psql_user_pass postgres some-name-22160 ++ local user=postgres ++ local cluster=some-name-22160 ++ local namespace=operator-self-healing-28680 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LV7wjKg46n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2KXh4wuvvD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.LV7wjKg46n +++ cat /tmp/tmp.2KXh4wuvvD +++ rm /tmp/tmp.LV7wjKg46n /tmp/tmp.2KXh4wuvvD +++ return 0 ++ [[ xsome-name-22160 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NlxL1v1ig2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WOjT88ZIpM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.NlxL1v1ig2 +++ cat /tmp/tmp.WOjT88ZIpM +++ rm /tmp/tmp.NlxL1v1ig2 /tmp/tmp.WOjT88ZIpM +++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.p13oPVelqJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NQI3H2TiVK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.p13oPVelqJ +++ cat /tmp/tmp.NQI3H2TiVK +++ rm /tmp/tmp.p13oPVelqJ /tmp/tmp.NQI3H2TiVK +++ return 0 ++ [[ xsome-name-22160-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2A5HnNtiSZ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RCjEEQrxXq ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.2A5HnNtiSZ ++++ cat /tmp/tmp.RCjEEQrxXq ++++ rm /tmp/tmp.2A5HnNtiSZ /tmp/tmp.RCjEEQrxXq ++++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1vnSbnbxwX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aWsBaikCyD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1vnSbnbxwX +++ cat /tmp/tmp.aWsBaikCyD +++ rm /tmp/tmp.1vnSbnbxwX /tmp/tmp.aWsBaikCyD +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iLHQvv5Jxu +++ mktemp ++ local LAST_ERR=/tmp/tmp.i1w18f4hVB ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.iLHQvv5Jxu ++ cat /tmp/tmp.i1w18f4hVB ++ rm /tmp/tmp.iLHQvv5Jxu /tmp/tmp.i1w18f4hVB ++ return 0 + local client_container=pg-client-76df9df876-l2dkh + kubectl_bin exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.E9QD8c0UqX ++ mktemp + local LAST_ERR=/tmp/tmp.mKV5nUgsvf + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.E9QD8c0UqX + cat /tmp/tmp.mKV5nUgsvf + rm /tmp/tmp.E9QD8c0UqX /tmp/tmp.mKV5nUgsvf + return 0 ++ get_psql_user_pass postgres some-name-22160 ++ local user=postgres ++ local cluster=some-name-22160 ++ local namespace=operator-self-healing-28680 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UR3YbJE0cJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o6OGXgPKmj +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UR3YbJE0cJ +++ cat /tmp/tmp.o6OGXgPKmj +++ rm /tmp/tmp.UR3YbJE0cJ /tmp/tmp.o6OGXgPKmj +++ return 0 ++ [[ xsome-name-22160 == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HxGTASFAnA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c6dP6UUblH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name-22160 -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.HxGTASFAnA +++ cat /tmp/tmp.c6dP6UUblH +++ rm /tmp/tmp.HxGTASFAnA /tmp/tmp.c6dP6UUblH +++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kVbGXwt6nZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.prFuTE2CzL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.kVbGXwt6nZ +++ cat /tmp/tmp.prFuTE2CzL +++ rm /tmp/tmp.kVbGXwt6nZ /tmp/tmp.prFuTE2CzL +++ return 0 ++ [[ xsome-name-22160-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.DbUgIVdl92 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.guwgxL1DdB ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n operator-self-healing-28680 get perconapgcluster/some-name-22160 -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.DbUgIVdl92 ++++ cat /tmp/tmp.guwgxL1DdB ++++ rm /tmp/tmp.DbUgIVdl92 /tmp/tmp.guwgxL1DdB ++++ return 0 +++ kubectl_bin -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UWts1p5ja1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p0KpajDHXC +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n operator-self-healing-28680 get secret/some-name-22160-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.UWts1p5ja1 +++ cat /tmp/tmp.p0KpajDHXC +++ rm /tmp/tmp.UWts1p5ja1 /tmp/tmp.p0KpajDHXC +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name-22160.operator-self-healing-28680 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dqqoda8BNv +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hz4rQ2psPM ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n operator-self-healing-28680 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Dqqoda8BNv ++ cat /tmp/tmp.Hz4rQ2psPM ++ rm /tmp/tmp.Dqqoda8BNv /tmp/tmp.Hz4rQ2psPM ++ return 0 + local client_container=pg-client-76df9df876-l2dkh + kubectl_bin exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.LvmgULIPIP ++ mktemp + local LAST_ERR=/tmp/tmp.4DgTFgWmhW + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-l2dkh -n operator-self-healing-28680 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name-22160.operator-self-healing-28680.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.LvmgULIPIP + cat /tmp/tmp.4DgTFgWmhW + rm /tmp/tmp.LvmgULIPIP /tmp/tmp.4DgTFgWmhW + return 0 + sleep 10 ++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dMS12eCaQP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9NmlocIR4u +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dMS12eCaQP +++ cat /tmp/tmp.9NmlocIR4u +++ rm /tmp/tmp.dMS12eCaQP /tmp/tmp.9NmlocIR4u +++ return 0 ++ echo postgres-operator-bb4cd6646-bpb6v + kill_pod postgres-operator-bb4cd6646-bpb6v + local pod=postgres-operator-bb4cd6646-bpb6v + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/chaos-pod-kill.yml + yq w - metadata.name chaos-cluster-pod-kill-8099 + yq w - 'spec.selector.pods.operator-self-healing-28680[0]' postgres-operator-bb4cd6646-bpb6v + kubectl_bin apply -f - ++ mktemp + yq w - metadata.namespace operator-self-healing-28680 + local LAST_OUT=/tmp/tmp.1OCDBIwFAV ++ mktemp + local LAST_ERR=/tmp/tmp.3vIoYXjXjT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.1OCDBIwFAV podchaos.chaos-mesh.org/chaos-cluster-pod-kill-8099 created + cat /tmp/tmp.3vIoYXjXjT + rm /tmp/tmp.1OCDBIwFAV /tmp/tmp.3vIoYXjXjT + return 0 + sleep 5 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-28680 + sleep 10 + set +o xtrace postgres-operator. + kubectl_bin patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' ++ mktemp + local LAST_OUT=/tmp/tmp.jTn7lTxceS ++ mktemp + local LAST_ERR=/tmp/tmp.HUgsG4sqDs + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.jTn7lTxceS perconapgcluster.pg.percona.com/some-name-22160 patched + cat /tmp/tmp.HUgsG4sqDs + rm /tmp/tmp.jTn7lTxceS /tmp/tmp.HUgsG4sqDs + return 0 + wait_deployments_removal some-name-22160-replica 0 + local name=some-name-22160-replica + local target_size=0 + set +o xtrace ......++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KT8vFSwCfK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6c7rEgflz2 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.KT8vFSwCfK +++ cat /tmp/tmp.6c7rEgflz2 +++ rm /tmp/tmp.KT8vFSwCfK /tmp/tmp.6c7rEgflz2 +++ return 0 ++ echo postgres-operator-bb4cd6646-dp4wr + failure_pod postgres-operator-bb4cd6646-dp4wr + local pod=postgres-operator-bb4cd6646-dp4wr + yq w - metadata.name chaos-cluster-pod-failure-22520 + yq w - metadata.namespace operator-self-healing-28680 + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/chaos-pod-failure.yml + yq w - 'spec.selector.pods.operator-self-healing-28680[0]' postgres-operator-bb4cd6646-dp4wr + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.t96zGnmzF8 ++ mktemp + local LAST_ERR=/tmp/tmp.RxaFA9mUek + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.t96zGnmzF8 podchaos.chaos-mesh.org/chaos-cluster-pod-failure-22520 created + cat /tmp/tmp.RxaFA9mUek + rm /tmp/tmp.t96zGnmzF8 /tmp/tmp.RxaFA9mUek + return 0 + sleep 10 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-28680 + sleep 10 + set +o xtrace postgres-operator.......... + kubectl_bin patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":2}]' ++ mktemp + local LAST_OUT=/tmp/tmp.KYLBCDnTnW ++ mktemp + local LAST_ERR=/tmp/tmp.HwpF78UR2E + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":2}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.KYLBCDnTnW perconapgcluster.pg.percona.com/some-name-22160 patched + cat /tmp/tmp.HwpF78UR2E + rm /tmp/tmp.KYLBCDnTnW /tmp/tmp.HwpF78UR2E + return 0 + wait_deployments_removal some-name-22160-replica 2 + local name=some-name-22160-replica + local target_size=2 + set +o xtrace .++ get_operator_pod +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bhXjEv0uoV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zQMzK8KZji +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.bhXjEv0uoV +++ cat /tmp/tmp.zQMzK8KZji +++ rm /tmp/tmp.bhXjEv0uoV /tmp/tmp.zQMzK8KZji +++ return 0 ++ echo postgres-operator-bb4cd6646-dp4wr + network_loss postgres-operator-bb4cd6646-dp4wr + local pod=postgres-operator-bb4cd6646-dp4wr + yq w - metadata.name chaos-cluster-network-loss-28858 + yq w - metadata.namespace operator-self-healing-28680 + yq w - 'spec.selector.pods.operator-self-healing-28680[0]' postgres-operator-bb4cd6646-dp4wr + cat /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/chaos-network-loss.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rMsEGWHEfQ ++ mktemp + local LAST_ERR=/tmp/tmp.jCUmMa0na0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.rMsEGWHEfQ networkchaos.chaos-mesh.org/chaos-cluster-network-loss-28858 created + cat /tmp/tmp.jCUmMa0na0 + rm /tmp/tmp.rMsEGWHEfQ /tmp/tmp.jCUmMa0na0 + return 0 + wait_deployment postgres-operator + local name=postgres-operator + local target_namespace=operator-self-healing-28680 + sleep 10 + set +o xtrace postgres-operator + kubectl_bin patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' ++ mktemp + local LAST_OUT=/tmp/tmp.eUYUjMTgng ++ mktemp + local LAST_ERR=/tmp/tmp.gq77c0SglH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl patch perconapgcluster/some-name-22160 --type json -p '[{"op":"replace","path":"/spec/pgReplicas/hotStandby/size","value":0}]' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.eUYUjMTgng perconapgcluster.pg.percona.com/some-name-22160 patched + cat /tmp/tmp.gq77c0SglH + rm /tmp/tmp.eUYUjMTgng /tmp/tmp.gq77c0SglH + return 0 + wait_deployments_removal some-name-22160-replica 0 + local name=some-name-22160-replica + local target_size=0 + set +o xtrace .....................................+ kubectl delete NetworkChaos --all networkchaos.chaos-mesh.org "chaos-cluster-network-loss-28858" deleted + kubectl delete PodChaos --all podchaos.chaos-mesh.org "chaos-cluster-pod-failure-22520" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill-8099" deleted + sleep 5 + destroy_chaos_mesh operator-self-healing-28680 + local chaos_mesh_ns=operator-self-healing-28680 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + helm del chaos-mesh --namespace operator-self-healing-28680 release "chaos-mesh" uninstalled + destroy operator-self-healing-28680 + grep -v level=info + grep -v level=debug + grep -v 'Getting pods from source' + grep -v 'Getting tasks for pod' ++ get_operator_pod + tee /tmp/tmp.KMBwDowh2k/operator.log + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' +++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + sort -u ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rT783D3j3n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jhA390qSrH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.rT783D3j3n +++ cat /tmp/tmp.jhA390qSrH +++ rm /tmp/tmp.rT783D3j3n /tmp/tmp.jhA390qSrH +++ return 0 ++ echo postgres-operator-bb4cd6646-dp4wr + kubectl_bin logs postgres-operator-bb4cd6646-dp4wr -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.0IF5STgEKK ++ mktemp + local LAST_ERR=/tmp/tmp.lwHtqTMXZq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl logs postgres-operator-bb4cd6646-dp4wr -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.0IF5STgEKK + cat /tmp/tmp.lwHtqTMXZq + rm /tmp/tmp.0IF5STgEKK /tmp/tmp.lwHtqTMXZq + return 0 ] { } }, ] } }, { } }, ] { } }, { } ] } { } }, ] ] { } }, }, { { } }, ] ], { } }, }, { }, { }], ], } { } }, } ], } }, } }, } { } }, }, 2024/04/24 19:42:07 INF 1 (localhost:4150) connecting to nsqd 2024/04/24 19:42:08 INF 2 (localhost:4150) connecting to nsqd 2024/04/24 19:42:26 ERR 1 (localhost:4150) IO error - EOF 2024/04/24 19:42:26 ERR 2 (localhost:4150) IO error - EOF 2024/04/24 19:42:26 INF 1 exiting router 2024/04/24 19:42:26 INF 1 (localhost:4150) beginning close 2024/04/24 19:42:26 INF 1 (localhost:4150) breaking out of writeLoop 2024/04/24 19:42:26 INF 1 (localhost:4150) clean close complete 2024/04/24 19:42:26 INF 1 (localhost:4150) finished draining, cleanup exiting 2024/04/24 19:42:26 INF 1 (localhost:4150) readLoop exiting 2024/04/24 19:42:26 INF 1 (localhost:4150) writeLoop exiting 2024/04/24 19:42:26 INF 2 exiting router 2024/04/24 19:42:26 INF 2 (localhost:4150) beginning close 2024/04/24 19:42:26 INF 2 (localhost:4150) breaking out of writeLoop 2024/04/24 19:42:26 INF 2 (localhost:4150) clean close complete 2024/04/24 19:42:26 INF 2 (localhost:4150) finished draining, cleanup exiting 2024/04/24 19:42:26 INF 2 (localhost:4150) readLoop exiting 2024/04/24 19:42:26 INF 2 (localhost:4150) writeLoop exiting 2024/04/24 19:43:09 INF 3 (localhost:4150) connecting to nsqd 2024/04/24 19:43:10 INF 4 (localhost:4150) connecting to nsqd 2024/04/24 19:43:15 INF 5 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "v1", "apiVersion": "v1", "command": [ "configMap": { "containerName": "database", "containerPort": 5432, "containerPort": 8009, "containers": [ "cpu": "500m", "crunchydata" "defaultMode": 288, "defaultMode": 420, "deployment-name": "some-name-22160-repl1" "deployment-name": "some-name-22160-repl2" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { E0424 19:42:58.428793 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.Pgcluster: failed to list *v1.Pgcluster: Get "https://10.234.208.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-28680/pgclusters?resourceVersion=104566": dial tcp 10.234.208.1:443: connect: no route to host E0424 19:42:58.428935 1 reflector.go:138] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://10.234.208.1:443/api/v1/namespaces/operator-self-healing-28680/configmaps?resourceVersion=104786": dial tcp 10.234.208.1:443: connect: no route to host "emptyDir": { "emptyDir": { "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { I0424 19:41:55.722067 1 shared_informer.go:240] Waiting for caches to sync for namespace I0424 19:41:55.822769 1 shared_informer.go:247] Caches are synced for namespace I0424 19:41:55.936147 1 shared_informer.go:240] Waiting for caches to sync for operator-self-healing-28680 I0424 19:41:56.036419 1 shared_informer.go:247] Caches are synced for operator-self-healing-28680 I0424 19:43:09.393450 1 trace.go:205] Trace[661108632]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.751) (total time: 10641ms): I0424 19:43:09.393782 1 trace.go:205] Trace[1268774027]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.649) (total time: 10743ms): I0424 19:43:09.394732 1 trace.go:205] Trace[1374624313]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.597) (total time: 10797ms): I0424 19:43:09.394965 1 trace.go:205] Trace[1705457145]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.732) (total time: 10662ms): I0424 19:43:09.395374 1 trace.go:205] Trace[228370320]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.659) (total time: 10735ms): I0424 19:43:09.402253 1 trace.go:205] Trace[1537940053]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167 (24-Apr-2024 19:42:58.528) (total time: 10873ms): "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "imagePullPolicy": "IfNotPresent" "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "pg-cluster", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "tls.crt", "key": "tls.key", "key": "vendor", "kind": "Deployment", "kind": "PersistentVolumeClaim", "labels": { "labels": { "labelSelector": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "memory": "256Mi" "metadata": { "metadata": { "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/pgconf", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgconf/tls", "mountPath": "/pgconf/tls-replication", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/tmp", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "cluster1-replication-ssl-keypair", "name": "cluster1-ssl-ca" "name": "cluster1-ssl-keypair" "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "MODE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "podinfo" "name": "podinfo", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "root-volume" "name": "root-volume", "name": "some-name-22160-pgha-config", "name": "some-name-22160-repl1", "name": "some-name-22160-repl1", "name": "some-name-22160-repl2", "name": "some-name-22160-repl2", "name": "some-name-22160-replica","pg-cluster": "some-name-22160","deployment-name": "some-name-22160-repl1","pgo-version": "1.6.0" "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "tls-replication" "name": "tls-replication", "name": "tls-server" "name": "tls-server", "name": "tmp" "name": "tmp", "name": "user-volume" "name": "user-volume", "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "tls-replication.crt" "path": "tls-replication.key" "periodSeconds": 15, "persistentVolumeClaim":{"claimName":"some-name-22160-repl1"} "persistentVolumeClaim":{"claimName":"some-name-22160-repl2"} "pg-cluster": "some-name-22160" "pg-cluster": "some-name-22160", "pg-cluster": "some-name-22160","deployment-name": "some-name-22160-repl2","pgo-version": "1.6.0","name": "some-name-22160-replica" "pgo-pg-database": "true", "pgo-pg-database": "true", "pgo-version": "1.6.0","name": "some-name-22160-replica","pg-cluster": "some-name-22160","deployment-name": "some-name-22160-repl1" "pgo-version": "1.6.0","name": "some-name-22160-replica","pg-cluster": "some-name-22160","deployment-name": "some-name-22160-repl2" "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "rollingUpdate": { "secret": { "secret": { "secretName": "some-name-22160-backrest-repo-config" "secretName": "some-name-22160-backrest-repo-config", "secretName": "some-name-22160-postgres-secret" "secretName": "some-name-22160-primaryuser-secret" "secretName": "some-name-22160-some-name-secret" "securityContext": { "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "selector": { "serviceAccountName": "pgo-pg", "sizeLimit": "16Mi" "sizeLimit": "2Mi" "sizeLimit": "64Mi" "some-name-22160" "sources": [ "sources": [ "spec": { "spec": { "storage": "1Gi" "strategy": { "template": { time="2024-04-24T19:41:55Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-24T19:41:57Z" level=error msg="create pgcluster resource: create pgcluster resource: pgclusters.pg.percona.com \"some-name-22160\" already exists" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onAdd()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:174" version=1.6.0 time="2024-04-24T19:42:07Z" level=warning msg="crv1 pgreplica some-name-22160-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:42:08Z" level=warning msg="crv1 pgreplica some-name-22160-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:42:57Z" level=error msg="Get \"https://10.234.208.1:443/apis/apps/v1/namespaces/operator-self-healing-28680/deployments?labelSelector=pg-cluster%3Dsome-name-22160%2Cpgo-pg-database\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/configmap.(*Controller).createPGHAConfigs()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/configmap/synchandler.go:90" version=1.6.0 time="2024-04-24T19:42:57Z" level=error msg="Get \"https://10.234.208.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-28680/pgclusters/some-name-22160\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:42:57Z" level=error msg="operator is unable to reconcile RBAC resource: Get \"https://10.234.208.1:443/api/v1/namespaces/operator-self-healing-28680/serviceaccounts/postgres-operator\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/internal/controller/manager.(*ControllerManager).reconcileRBAC()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/manager/rbac.go:51" version=1.6.0 time="2024-04-24T19:42:57Z" level=error msg="reconcile perocnapgclusters: get operator ns list: get namespaceOperatingMode: Post \"https://10.234.208.1:443/apis/authorization.k8s.io/v1/selfsubjectaccessreviews\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).reconcilePerconaPG()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:310" version=1.6.0 time="2024-04-24T19:42:57Z" level=error msg="update perconapgcluster: update pgcluster: update pgPrimary deployment: wait deployment: get deployment: Get \"https://10.234.208.1:443/apis/apps/v1/namespaces/operator-self-healing-28680/deployments/some-name-22160\": http2: client connection lost" func="github.com/percona/percona-postgresql-operator/percona/controllers/pgc.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/percona/controllers/pgc/pgc.go:542" version=1.6.0 time="2024-04-24T19:42:58Z" level=error msg="Get \"https://10.234.208.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-28680/pgclusters/some-name-22160\": dial tcp 10.234.208.1:443: connect: no route to host" func="github.com/percona/percona-postgresql-operator/internal/controller/pod.(*Controller).onUpdate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/pod/podcontroller.go:103" version=1.6.0 time="2024-04-24T19:42:58Z" level=error msg="Get \"https://10.234.208.1:443/api/v1/namespaces/operator-self-healing-28680/configmaps/some-name-22160-config\": dial tcp 10.234.208.1:443: connect: no route to host" func="github.com/percona/percona-postgresql-operator/internal/controller/configmap.(*Controller).syncPGHAConfig.func1()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/configmap/synchandler.go:108" version=1.6.0 time="2024-04-24T19:42:58Z" level=error msg="operator is unable to reconcile RBAC resource: Get \"https://10.234.208.1:443/api/v1/namespaces/operator-self-healing-28680/serviceaccounts/pgo-default\": dial tcp 10.234.208.1:443: connect: no route to host" func="github.com/percona/percona-postgresql-operator/internal/controller/manager.(*ControllerManager).reconcileServiceAccounts()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/controller/manager/rbac.go:132" version=1.6.0 time="2024-04-24T19:43:09Z" level=warning msg="crv1 pgreplica some-name-22160-repl1 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 time="2024-04-24T19:43:09Z" level=warning msg="crv1 pgreplica some-name-22160-repl2 is already marked complete, will not recreate" func="github.com/percona/percona-postgresql-operator/internal/operator/cluster.ScaleBase()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/cluster/cluster.go:535" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" Trace[1268774027]: [10.743991787s] [10.743991787s] END Trace[1268774027]: ---"Objects listed" error: 10743ms (19:43:09.393) Trace[1374624313]: [10.797104491s] [10.797104491s] END Trace[1374624313]: ---"Objects listed" error: 10797ms (19:43:09.394) Trace[1537940053]: [10.87396854s] [10.87396854s] END Trace[1537940053]: ---"Objects listed" error: 10873ms (19:43:09.402) Trace[1705457145]: [10.662580265s] [10.662580265s] END Trace[1705457145]: ---"Objects listed" error: 10662ms (19:43:09.394) Trace[228370320]: [10.735358008s] [10.735358008s] END Trace[228370320]: ---"Objects listed" error: 10735ms (19:43:09.395) Trace[661108632]: [10.641754362s] [10.641754362s] END Trace[661108632]: ---"Objects listed" error: 10641ms (19:43:09.393) "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-22160-backrest-shared-repo" "value": "crunchy-pgha-scope" "value": "db" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "/pgdata/some-name-22160-repl1" "value": "/pgdata/some-name-22160-repl1" "value": "/pgdata/some-name-22160-repl2" "value": "/pgdata/some-name-22160-repl2" "value": "posix" "value": "postgres" "values": [ "value": "some-name-22160" "value": "some-name-22160-backrest-shared-repo" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata", "vendor": "crunchydata", "volumeMounts": [{ "volumes": [{ W0424 19:42:57.241865 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Job ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.241873 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.PerconaPGCluster ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.241924 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgreplica ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.241957 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.241990 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgtask ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.242021 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgcluster ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.242041 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pgpolicy ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:57.242172 1 reflector.go:442] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: watch of *v1.Pod ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding W0424 19:42:58.428750 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.Pgcluster: Get "https://10.234.208.1:443/apis/pg.percona.com/v1/namespaces/operator-self-healing-28680/pgclusters?resourceVersion=104566": dial tcp 10.234.208.1:443: connect: no route to host W0424 19:42:58.428895 1 reflector.go:324] pkg/mod/k8s.io/client-go@v0.23.0/tools/cache/reflector.go:167: failed to list *v1.ConfigMap: Get "https://10.234.208.1:443/api/v1/namespaces/operator-self-healing-28680/configmaps?resourceVersion=104786": dial tcp 10.234.208.1:443: connect: no route to host "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.KMBwDowh2k/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.U2IL8ODkNl ++ mktemp + local LAST_ERR=/tmp/tmp.gQFovHD2Ed + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.KMBwDowh2k/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.KMBwDowh2k/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.KMBwDowh2k/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.U2IL8ODkNl + cat /tmp/tmp.gQFovHD2Ed Error from server (NotFound): error when deleting "/tmp/tmp.KMBwDowh2k/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.KMBwDowh2k/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.KMBwDowh2k/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.KMBwDowh2k/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.KMBwDowh2k/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.U2IL8ODkNl /tmp/tmp.gQFovHD2Ed + return 1 + true + yq w -d4 /tmp/tmp.KMBwDowh2k/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.KMBwDowh2k/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.giO95O3EWZ ++ mktemp + local LAST_ERR=/tmp/tmp.kVzAZoueAO + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.KMBwDowh2k/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.giO95O3EWZ serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.kVzAZoueAO + rm /tmp/tmp.giO95O3EWZ /tmp/tmp.kVzAZoueAO + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=operator-self-healing-28680 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n operator-self-healing-28680 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.KMBwDowh2k/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.QkFXoZ8j8q ++ mktemp + local LAST_ERR=/tmp/tmp.0sBWsP5OmV + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.KMBwDowh2k/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.QkFXoZ8j8q serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.0sBWsP5OmV + rm /tmp/tmp.QkFXoZ8j8q /tmp/tmp.0sBWsP5OmV + return 0 + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + kubectl_bin get ns + xargs kubectl delete ns --grace-period=0 --force=true ++ mktemp + local LAST_OUT=/tmp/tmp.yXA28sBJWB ++ mktemp + local LAST_ERR=/tmp/tmp.jF9JD0hYCH + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yXA28sBJWB + cat /tmp/tmp.jF9JD0hYCH + rm /tmp/tmp.yXA28sBJWB /tmp/tmp.jF9JD0hYCH + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "operator-self-healing-28680" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.VtPbsLZ1BE ++ mktemp + local LAST_ERR=/tmp/tmp.VB1fjgusYz + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.VtPbsLZ1BE + cat /tmp/tmp.VB1fjgusYz Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.VtPbsLZ1BE /tmp/tmp.VB1fjgusYz + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.aA6BtL3pwm ++ mktemp + local LAST_ERR=/tmp/tmp.D8uQk8jWv8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.aA6BtL3pwm + cat /tmp/tmp.D8uQk8jWv8 Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.aA6BtL3pwm /tmp/tmp.D8uQk8jWv8 + return 1 + true + rm -rf /tmp/tmp.KMBwDowh2k