++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/monitoring.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/logs/monitoring.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.15.1 ++ '[' v3 == v2 ']' + create_namespace monitoring-7567 + local namespace=monitoring-7567 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + cleanup_rbac + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.oJGFEED4Da ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.zQVX3mmLCl ++ mktemp + local LAST_ERR=/tmp/tmp.vRtZdxzTTv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + local LAST_ERR=/tmp/tmp.MbBDQdxyU7 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl get ns + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oJGFEED4Da + cat /tmp/tmp.vRtZdxzTTv + rm /tmp/tmp.oJGFEED4Da /tmp/tmp.vRtZdxzTTv + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.zQVX3mmLCl + cat /tmp/tmp.MbBDQdxyU7 Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.zQVX3mmLCl /tmp/tmp.MbBDQdxyU7 + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.Mv4LSlL2cL ++ mktemp + local LAST_ERR=/tmp/tmp.GsDUx1yqH4 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.Mv4LSlL2cL + cat /tmp/tmp.GsDUx1yqH4 Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.Mv4LSlL2cL /tmp/tmp.GsDUx1yqH4 + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace monitoring-7567 ++ mktemp + local LAST_OUT=/tmp/tmp.YLi4JVVrtL ++ mktemp + local LAST_ERR=/tmp/tmp.cuqyKVjXEB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace monitoring-7567 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace monitoring-7567 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete namespace monitoring-7567 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.YLi4JVVrtL + cat /tmp/tmp.cuqyKVjXEB Error from server (NotFound): namespaces "monitoring-7567" not found + rm /tmp/tmp.YLi4JVVrtL /tmp/tmp.cuqyKVjXEB + return 1 + : + wait_for_delete namespace/monitoring-7567 + local res=namespace/monitoring-7567 + set +o xtrace namespace/monitoring-7567 - Error from server (NotFound): namespaces "monitoring-7567" not found + kubectl_bin create namespace monitoring-7567 ++ mktemp + local LAST_OUT=/tmp/tmp.Z2unoFFSm9 ++ mktemp + local LAST_ERR=/tmp/tmp.3J0LenMXUx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl create namespace monitoring-7567 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Z2unoFFSm9 namespace/monitoring-7567 created + cat /tmp/tmp.3J0LenMXUx + rm /tmp/tmp.Z2unoFFSm9 /tmp/tmp.3J0LenMXUx + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.e1nM6G3pfr +++ mktemp ++ local LAST_ERR=/tmp/tmp.xmCUsQVu8J ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.e1nM6G3pfr ++ cat /tmp/tmp.xmCUsQVu8J ++ rm /tmp/tmp.e1nM6G3pfr /tmp/tmp.xmCUsQVu8J ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox --namespace=monitoring-7567 ++ mktemp + local LAST_OUT=/tmp/tmp.BtJwh66G2Y ++ mktemp + local LAST_ERR=/tmp/tmp.FlTR7CpTEv + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox --namespace=monitoring-7567 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BtJwh66G2Y Context "gke_cloud-dev-112233_us-central1-c_jen-pg-776-0087ef129-2-sandbox" modified. + cat /tmp/tmp.FlTR7CpTEv + rm /tmp/tmp.BtJwh66G2Y /tmp/tmp.FlTR7CpTEv + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=monitoring-7567 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml disabled install monitoring-7567 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=monitoring-7567 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "monitoring-7567"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' + /usr/bin/sed -e 's#^namespace: .*#namespace: "monitoring-7567"#g' + cat /dev/fd/63 - ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-776-0087ef129 ++ cut -d: -f2 + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-776-0087ef129"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/operator.yaml metadata.namespace monitoring-7567 + yq w -d3 - 'subjects[0].namespace' monitoring-7567 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-776-0087ef129-pgo-deployer ++ cat /tmp/tmp.9Jeg1znX05/operator.ini + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "monitoring-7567" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-776-0087ef129" pgo_operator_namespace: "monitoring-7567"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.9Jeg1znX05/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hdj7eDIGpq ++ mktemp + local LAST_ERR=/tmp/tmp.Dntt74aWak + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.9Jeg1znX05/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.hdj7eDIGpq serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.Dntt74aWak + rm /tmp/tmp.hdj7eDIGpq /tmp/tmp.Dntt74aWak + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-7567 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 35 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.9Jeg1znX05/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YepT595woY ++ mktemp + local LAST_ERR=/tmp/tmp.RYW4f1k7so + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.9Jeg1znX05/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YepT595woY serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.RYW4f1k7so + rm /tmp/tmp.YepT595woY /tmp/tmp.RYW4f1k7so + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vj5QNGRfGQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.q6ZcUeoZNB ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Vj5QNGRfGQ ++ cat /tmp/tmp.q6ZcUeoZNB ++ rm /tmp/tmp.Vj5QNGRfGQ /tmp/tmp.q6ZcUeoZNB ++ return 0 + wait_pod postgres-operator-85ccdd58dc-92prj + local pod=postgres-operator-85ccdd58dc-92prj + local ns= + set +o xtrace postgres-operator-85ccdd58dc-92prjtrue + [[ -n '' ]] + deploy_helm monitoring-7567 + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + helm repo remove percona "percona" has been removed from your repositories + helm repo add percona https://percona-charts.storage.googleapis.com/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'create first PGO cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PGO cluster ----------------------------------------------------------------------------------- + cluster=some-name + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + platform=kubernetes + helm del --purge monitoring Error: unknown flag: --purge + : + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server --set platform=kubernetes https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Fri May 24 20:36:55 2024 NAMESPACE: monitoring-7567 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-7567.svc.cluster.local:443 login: admin password: admin + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.fpFa2BZwqo ++ mktemp + local LAST_ERR=/tmp/tmp.cxUax3vZ0f + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.fpFa2BZwqo + cat /tmp/tmp.cxUax3vZ0f error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.fpFa2BZwqo /tmp/tmp.cxUax3vZ0f + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.cpA3fmHizW ++ mktemp + local LAST_ERR=/tmp/tmp.4jZZBxg4wn + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.cpA3fmHizW + cat /tmp/tmp.4jZZBxg4wn + rm /tmp/tmp.cpA3fmHizW /tmp/tmp.4jZZBxg4wn + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jG8qtQVOBu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vu2JpieEEX ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.jG8qtQVOBu ++ cat /tmp/tmp.Vu2JpieEEX ++ rm /tmp/tmp.jG8qtQVOBu /tmp/tmp.Vu2JpieEEX ++ return 0 + ADMIN_PASSWORD=admin + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.dfK9LczVeD ++ mktemp + local LAST_ERR=/tmp/tmp.PRlnS5VCzZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.dfK9LczVeD logger=settings t=2024-05-24T20:37:21.092535162Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2024-05-24T20:37:21.092711018Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2024-05-24T20:37:21.092725303Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2024-05-24T20:37:21.092733828Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2024-05-24T20:37:21.092740898Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2024-05-24T20:37:21.092747872Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2024-05-24T20:37:21.092754215Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2024-05-24T20:37:21.092760629Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2024-05-24T20:37:21.092766817Z level=info msg="App mode production" logger=sqlstore t=2024-05-24T20:37:21.092870807Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2024-05-24T20:37:21.111268673Z level=info msg="Starting DB migrations" logger=migrator t=2024-05-24T20:37:21.115321921Z level=info msg="migrations completed" performed=0 skipped=452 duration=421.196µs logger=secrets t=2024-05-24T20:37:21.116882458Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2024-05-24T20:37:21.154418194Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2024-05-24T20:37:21.389189644Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2024-05-24T20:37:21.389241872Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2024-05-24T20:37:21.403989391Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2024-05-24T20:37:21.404305535Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2024-05-24T20:37:21.404378474Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2024-05-24T20:37:21.404430511Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.loader t=2024-05-24T20:37:21.404522394Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2024-05-24T20:37:21.404535985Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2024-05-24T20:37:21.404544711Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2024-05-24T20:37:21.404553515Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2024-05-24T20:37:21.404562685Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2024-05-24T20:37:21.404571478Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2024-05-24T20:37:21.404580197Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2024-05-24T20:37:21.40458882Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2024-05-24T20:37:21.419748833Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2024-05-24T20:37:21.419784808Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2024-05-24T20:37:21.419798171Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2024-05-24T20:37:21.419805478Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2024-05-24T20:37:21.419812548Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel Admin password changed successfully ✔ + cat /tmp/tmp.PRlnS5VCzZ + rm /tmp/tmp.dfK9LczVeD /tmp/tmp.PRlnS5VCzZ + return 0 + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/conf/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/conf/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + local namespace=monitoring-7567 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name monitoring-7567 + local cluster=some-name + local namespace=monitoring-7567 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + kubectl_bin -n monitoring-7567 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9xQqyEkAcI ++ mktemp + local LAST_ERR=/tmp/tmp.tQS69iVVXl + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9xQqyEkAcI secret/some-name-users-secret created + cat /tmp/tmp.tQS69iVVXl + rm /tmp/tmp.9xQqyEkAcI /tmp/tmp.tQS69iVVXl + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-776/deploy/users-secret.yaml + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n monitoring-7567 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XVwGusxOQt ++ mktemp + local LAST_ERR=/tmp/tmp.hCTkfo0IVw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.XVwGusxOQt secret/some-name-pmm-secret created + cat /tmp/tmp.hCTkfo0IVw + rm /tmp/tmp.XVwGusxOQt /tmp/tmp.hCTkfo0IVw + return 0 + kubectl_bin -n monitoring-7567 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.so6XOKxJHP ++ mktemp + local LAST_ERR=/tmp/tmp.BgXodk4ikq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.so6XOKxJHP deployment.apps/pg-client created + cat /tmp/tmp.BgXodk4ikq + rm /tmp/tmp.so6XOKxJHP /tmp/tmp.BgXodk4ikq + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/conf/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/conf/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/conf/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.labels.deployment-name some-name + yq w - spec.keepData false + yq w - metadata.labels.pg-cluster some-name + yq w - metadata.name some-name + yq w - spec.name some-name + yq w - spec.database some-name + yq w - spec.clustername some-name + yq w - spec.user some-name + yq w - spec.secretsName some-name-users-secret + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.keepBackups false + yq w - spec.pmm.image perconalab/pmm-client:2.41.2 + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - metadata.labels.name some-name + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.9Jeg1znX05/cr.yaml -n monitoring-7567 ++ mktemp + local LAST_OUT=/tmp/tmp.WysdkOttyC ++ mktemp + local LAST_ERR=/tmp/tmp.mVCuUyTGXX + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.9Jeg1znX05/cr.yaml -n monitoring-7567 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.WysdkOttyC perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.mVCuUyTGXX + rm /tmp/tmp.WysdkOttyC /tmp/tmp.mVCuUyTGXX + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=monitoring-7567 + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=monitoring-7567 + sleep 10 + set +o xtrace some-name.......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=monitoring-7567 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=monitoring-7567 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=monitoring-7567 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ed59Gu3ueW +++ mktemp ++ local LAST_ERR=/tmp/tmp.8wdH5a2uWR ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Ed59Gu3ueW ++ cat /tmp/tmp.8wdH5a2uWR ++ rm /tmp/tmp.Ed59Gu3ueW /tmp/tmp.8wdH5a2uWR ++ return 0 + status_query='kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gc0uHxJzWo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iWc2S1L9Aq +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.gc0uHxJzWo +++ cat /tmp/tmp.iWc2S1L9Aq +++ rm /tmp/tmp.gc0uHxJzWo /tmp/tmp.iWc2S1L9Aq +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qkvaPLIRZP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9olEilBZGZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qkvaPLIRZP +++ cat /tmp/tmp.9olEilBZGZ +++ rm /tmp/tmp.qkvaPLIRZP /tmp/tmp.9olEilBZGZ +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-7567 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rJc0S0mogX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3jqJTQJZR6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.rJc0S0mogX +++ cat /tmp/tmp.3jqJTQJZR6 +++ rm /tmp/tmp.rJc0S0mogX /tmp/tmp.3jqJTQJZR6 +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mRg73zdm98 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bDnZOEYvxA +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.mRg73zdm98 +++ cat /tmp/tmp.bDnZOEYvxA +++ rm /tmp/tmp.mRg73zdm98 /tmp/tmp.bDnZOEYvxA +++ return 0 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jykwTgL0ft ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Bkr7ZPAVMD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jykwTgL0ft +++ cat /tmp/tmp.Bkr7ZPAVMD +++ rm /tmp/tmp.jykwTgL0ft /tmp/tmp.Bkr7ZPAVMD +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EKGU9hl77I +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.j6iVKH45V3 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.EKGU9hl77I ++++ cat /tmp/tmp.j6iVKH45V3 ++++ rm /tmp/tmp.EKGU9hl77I /tmp/tmp.j6iVKH45V3 ++++ return 0 +++ kubectl_bin -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DxOBEO3sxM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PFJXXPUf1I +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.DxOBEO3sxM +++ cat /tmp/tmp.PFJXXPUf1I +++ rm /tmp/tmp.DxOBEO3sxM /tmp/tmp.PFJXXPUf1I +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.monitoring-7567 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.monitoring-7567 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WNaIgUAIYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.nmrp4VKJh6 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.WNaIgUAIYK ++ cat /tmp/tmp.nmrp4VKJh6 ++ rm /tmp/tmp.WNaIgUAIYK /tmp/tmp.nmrp4VKJh6 ++ return 0 + local client_container=pg-client-76df9df876-kxqks + kubectl_bin exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.g2ItKeeS1Z ++ mktemp + local LAST_ERR=/tmp/tmp.dDOaeJZEOV + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.g2ItKeeS1Z + cat /tmp/tmp.dDOaeJZEOV + rm /tmp/tmp.g2ItKeeS1Z /tmp/tmp.dDOaeJZEOV + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-7567 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8MwemLoxC5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KjUlURAE0c +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.8MwemLoxC5 +++ cat /tmp/tmp.KjUlURAE0c +++ rm /tmp/tmp.8MwemLoxC5 /tmp/tmp.KjUlURAE0c +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PQLtIzxUwA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cFufZlZlT4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.PQLtIzxUwA +++ cat /tmp/tmp.cFufZlZlT4 +++ rm /tmp/tmp.PQLtIzxUwA /tmp/tmp.cFufZlZlT4 +++ return 0 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JzK1l4x7OA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GmwfMIRsNE +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.JzK1l4x7OA +++ cat /tmp/tmp.GmwfMIRsNE +++ rm /tmp/tmp.JzK1l4x7OA /tmp/tmp.GmwfMIRsNE +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Kj6de6aCoj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.aD550gHCQ0 ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.Kj6de6aCoj ++++ cat /tmp/tmp.aD550gHCQ0 ++++ rm /tmp/tmp.Kj6de6aCoj /tmp/tmp.aD550gHCQ0 ++++ return 0 +++ kubectl_bin -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fQqKG8Zoo4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QkQPyyQfQL +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fQqKG8Zoo4 +++ cat /tmp/tmp.QkQPyyQfQL +++ rm /tmp/tmp.fQqKG8Zoo4 /tmp/tmp.QkQPyyQfQL +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.monitoring-7567 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.monitoring-7567 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NmdEyBcXIr +++ mktemp ++ local LAST_ERR=/tmp/tmp.ATIeFMDOBL ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.NmdEyBcXIr ++ cat /tmp/tmp.ATIeFMDOBL ++ rm /tmp/tmp.NmdEyBcXIr /tmp/tmp.ATIeFMDOBL ++ return 0 + local client_container=pg-client-76df9df876-kxqks + kubectl_bin exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.ax4WYyJ97S ++ mktemp + local LAST_ERR=/tmp/tmp.bbSqqHi9RU + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.ax4WYyJ97S + cat /tmp/tmp.bbSqqHi9RU + rm /tmp/tmp.ax4WYyJ97S /tmp/tmp.bbSqqHi9RU + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-7567 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.77wJYtZuw5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DRGsutJrqb +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.77wJYtZuw5 +++ cat /tmp/tmp.DRGsutJrqb +++ rm /tmp/tmp.77wJYtZuw5 /tmp/tmp.DRGsutJrqb +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tteDJ5DmZB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xmNW9QrPnu +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.tteDJ5DmZB +++ cat /tmp/tmp.xmNW9QrPnu +++ rm /tmp/tmp.tteDJ5DmZB /tmp/tmp.xmNW9QrPnu +++ return 0 +++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MbGIwxJu44 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2ZnE2cPHtD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.MbGIwxJu44 +++ cat /tmp/tmp.2ZnE2cPHtD +++ rm /tmp/tmp.MbGIwxJu44 /tmp/tmp.2ZnE2cPHtD +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nFaYdkgCDx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lKa8WVL39F ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++++ kubectl -n monitoring-7567 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.nFaYdkgCDx ++++ cat /tmp/tmp.lKa8WVL39F ++++ rm /tmp/tmp.nFaYdkgCDx /tmp/tmp.lKa8WVL39F ++++ return 0 +++ kubectl_bin -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9Lr9c9nzcX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G2R8x53EJO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl -n monitoring-7567 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9Lr9c9nzcX +++ cat /tmp/tmp.G2R8x53EJO +++ rm /tmp/tmp.9Lr9c9nzcX /tmp/tmp.G2R8x53EJO +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.monitoring-7567 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.monitoring-7567 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mgn3LqNuXm +++ mktemp ++ local LAST_ERR=/tmp/tmp.YUmyuQgLEF ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl -n monitoring-7567 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.Mgn3LqNuXm ++ cat /tmp/tmp.YUmyuQgLEF ++ rm /tmp/tmp.Mgn3LqNuXm /tmp/tmp.YUmyuQgLEF ++ return 0 + local client_container=pg-client-76df9df876-kxqks + kubectl_bin exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.B2ac4U14V0 ++ mktemp + local LAST_ERR=/tmp/tmp.8zSm6bnTKx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl exec pg-client-76df9df876-kxqks -n monitoring-7567 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-7567.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.B2ac4U14V0 + cat /tmp/tmp.8zSm6bnTKx + rm /tmp/tmp.B2ac4U14V0 /tmp/tmp.8zSm6bnTKx + return 0 + sleep 10 + desc 'compare deployment' + set +o xtrace ----------------------------------------------------------------------------------- compare deployment ----------------------------------------------------------------------------------- + compare_kubectl deployment/some-name + local resource=deployment/some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/compare/deployment_some-name.yml + local new_result=/tmp/tmp.9Jeg1znX05/deployment_some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - metadata.managedFields + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' ++ mktemp + yq d - metadata.selfLink + yq d - '**.clusterIP' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.finalizers' + yq d - '**.clusterIPs' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.dataSource' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.image' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."percona.com/*"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.volumeName' + local LAST_OUT=/tmp/tmp.iKoyCAZ7FW + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.enableServiceLinks' + yq d - spec.volumeMode + yq d - '**.nodePort' + yq d - status + yq d - '**.imagePullSecrets' + yq d - spec.manualSelector + yq d - '**.(name==suffix)' + yq d - '**.ipFamilyPolicy' + yq d - '**.(name==NAMESPACE)' + yq d - spec.podReplacementPolicy + yq d - '**.(name==COMMAND_OPTS)' + yq d - '**.(name==PODNAME)' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.nodeName + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.healthCheckNodePort' + yq d - '**.internalTrafficPolicy' + yq d - '**.preemptionPolicy' + yq d - '**.ipFamilies' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.allocateLoadBalancerNodePorts' ++ mktemp + yq d - '**.controller-uid' + local LAST_ERR=/tmp/tmp.io5Lr2sTDn + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.creationTimestamp' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl get -o yaml deployment/some-name + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.iKoyCAZ7FW + cat /tmp/tmp.io5Lr2sTDn + rm /tmp/tmp.iKoyCAZ7FW /tmp/tmp.io5Lr2sTDn + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.9Jeg1znX05/deployment_some-name.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-776/e2e-tests/monitoring/compare/deployment_some-name.yml /tmp/tmp.9Jeg1znX05/deployment_some-name.yml + desc 'check metrics' + set +o xtrace ----------------------------------------------------------------------------------- check metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds some-name admin:admin + local metric=node_boot_time_seconds + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1716583160 ++ /usr/bin/date -u +%s + local end=1716583220 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.U2MmOC6wEw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XcYVzdRkXP +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.U2MmOC6wEw +++ cat /tmp/tmp.XcYVzdRkXP +++ rm /tmp/tmp.U2MmOC6wEw /tmp/tmp.XcYVzdRkXP +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qZBVMUH6LN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.B1jRvrE9ep +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qZBVMUH6LN +++ cat /tmp/tmp.B1jRvrE9ep +++ rm /tmp/tmp.qZBVMUH6LN /tmp/tmp.B1jRvrE9ep +++ return 0 ++ local ip=104.154.153.113 ++ '[' -n 104.154.153.113 -a 104.154.153.113 '!=' null ']' ++ echo 104.154.153.113 ++ return + local endpoint=104.154.153.113 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@104.154.153.113/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7D%29&start=1716583160&end=1716583220&step=60' + grep '^"[0-9]' "1716571962" "1716571962" + desc 'check qan20 metrics' + set +o xtrace ----------------------------------------------------------------------------------- check qan20 metrics ----------------------------------------------------------------------------------- + get_qan20_values some-name admin:admin + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2024-05-24T20:10:22 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2024-05-24T20:40:22 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o7pnlpUdGc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rZLYBqTZcO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.o7pnlpUdGc +++ cat /tmp/tmp.rZLYBqTZcO +++ rm /tmp/tmp.o7pnlpUdGc /tmp/tmp.rZLYBqTZcO +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Oayk0LK9Fm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g2JqcN9irS +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Oayk0LK9Fm +++ cat /tmp/tmp.g2JqcN9irS +++ rm /tmp/tmp.Oayk0LK9Fm /tmp/tmp.g2JqcN9irS +++ return 0 ++ local ip=104.154.153.113 ++ '[' -n 104.154.153.113 -a 104.154.153.113 '!=' null ']' ++ echo 104.154.153.113 ++ return + local endpoint=104.154.153.113 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@104.154.153.113/v0/qan/GetReport + jq '.rows[].sparkline' [ { "time_frame": 360, "timestamp": "2024-05-24T20:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2024-05-24T20:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2024-05-24T20:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2024-05-24T20:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2024-05-24T20:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2024-05-24T20:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2024-05-24T20:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2024-05-24T19:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2024-05-24T19:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2024-05-24T19:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2024-05-24T19:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2024-05-24T19:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2024-05-24T19:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2024-05-24T19:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2024-05-24T19:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2024-05-24T19:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2024-05-24T19:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2024-05-24T18:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2024-05-24T18:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2024-05-24T18:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2024-05-24T18:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2024-05-24T18:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2024-05-24T18:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2024-05-24T18:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2024-05-24T18:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2024-05-24T18:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2024-05-24T18:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2024-05-24T17:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2024-05-24T17:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2024-05-24T17:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2024-05-24T17:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2024-05-24T17:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2024-05-24T17:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2024-05-24T17:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2024-05-24T17:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2024-05-24T17:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2024-05-24T17:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2024-05-24T16:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2024-05-24T16:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2024-05-24T16:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2024-05-24T16:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2024-05-24T16:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2024-05-24T16:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2024-05-24T16:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2024-05-24T16:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2024-05-24T16:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2024-05-24T16:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2024-05-24T15:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2024-05-24T15:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2024-05-24T15:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2024-05-24T15:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2024-05-24T15:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2024-05-24T15:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2024-05-24T15:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2024-05-24T15:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2024-05-24T15:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2024-05-24T15:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2024-05-24T14:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2024-05-24T14:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2024-05-24T14:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2024-05-24T14:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2024-05-24T14:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2024-05-24T14:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2024-05-24T14:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2024-05-24T14:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2024-05-24T14:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2024-05-24T14:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2024-05-24T13:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2024-05-24T13:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2024-05-24T13:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2024-05-24T13:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2024-05-24T13:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2024-05-24T13:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2024-05-24T13:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2024-05-24T13:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2024-05-24T13:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2024-05-24T13:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2024-05-24T12:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2024-05-24T12:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2024-05-24T12:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2024-05-24T12:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2024-05-24T12:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2024-05-24T12:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2024-05-24T12:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2024-05-24T12:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2024-05-24T12:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2024-05-24T12:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2024-05-24T11:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2024-05-24T11:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2024-05-24T11:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2024-05-24T11:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2024-05-24T11:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2024-05-24T11:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2024-05-24T11:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2024-05-24T11:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2024-05-24T11:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2024-05-24T11:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2024-05-24T10:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2024-05-24T10:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2024-05-24T10:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2024-05-24T10:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2024-05-24T10:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2024-05-24T10:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2024-05-24T10:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2024-05-24T10:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2024-05-24T10:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2024-05-24T10:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2024-05-24T09:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2024-05-24T09:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2024-05-24T09:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2024-05-24T09:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2024-05-24T09:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2024-05-24T09:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2024-05-24T09:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2024-05-24T09:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2024-05-24T09:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2024-05-24T09:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2024-05-24T08:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2024-05-24T08:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2024-05-24T08:46:00Z" } ] + rm -f payload.json + [[ -n '' ]] + helm delete monitoring release "monitoring" uninstalled + destroy monitoring-7567 + grep -v level=debug + grep -v level=info + grep -v 'Getting tasks for pod' ++ get_operator_pod + sort -u + grep -v 'the object has been modified' + grep -v 'Getting pods from source' ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + tee /tmp/tmp.9Jeg1znX05/operator.log + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' +++ mktemp ++ local LAST_OUT=/tmp/tmp.56wXnai7bR +++ mktemp ++ local LAST_ERR=/tmp/tmp.9dUqG0WMJq ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.56wXnai7bR ++ cat /tmp/tmp.9dUqG0WMJq ++ rm /tmp/tmp.56wXnai7bR /tmp/tmp.9dUqG0WMJq ++ return 0 + kubectl_bin logs postgres-operator-85ccdd58dc-92prj -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.QJlbuQ2aSz ++ mktemp + local LAST_ERR=/tmp/tmp.Jha40F2OFG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl logs postgres-operator-85ccdd58dc-92prj -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.QJlbuQ2aSz + cat /tmp/tmp.Jha40F2OFG + rm /tmp/tmp.QJlbuQ2aSz /tmp/tmp.Jha40F2OFG + return 0 ] { } }, ] } }, { } }, { } ] } } ] } { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ], { } }, }], { } }, }], } ], } }, } }, } }, }, { } }, }, 2024/05/24 20:37:26 INF 1 (localhost:4150) connecting to nsqd 2024/05/24 20:38:16 INF 2 (localhost:4150) connecting to nsqd 2024/05/24 20:38:17 INF 3 (localhost:4150) connecting to nsqd 2024/05/24 20:38:25 INF 4 (localhost:4150) connecting to nsqd 2024/05/24 20:38:51 INF 5 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "some-name-pgbr-repo" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "2", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "defaultMode": 288 "defaultMode": 420, "deployment-name": "some-name" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0524 20:36:22.340369 1 shared_informer.go:240] Waiting for caches to sync for namespace I0524 20:36:22.441015 1 shared_informer.go:247] Caches are synced for namespace I0524 20:36:22.533969 1 shared_informer.go:240] Waiting for caches to sync for monitoring-7567 I0524 20:36:22.734935 1 shared_informer.go:247] Caches are synced for monitoring-7567 "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "limits": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "48Mi" "memory": "512Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "name": "backrest", "name": "backrest-backup-some-name", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", , {"name":"pmm-client","image":"perconalab/pmm-client:2.41.2","ports":[{"containerPort":7777,"protocol":"TCP"},{"containerPort":30100},{"containerPort":30101},{"containerPort":30102},{"containerPort":30103},{"containerPort":30104},{"containerPort":30105}],"env":[{"name":"PMM_USER","value":"admin"},{"name":"PMM_SERVER","value":"monitoring-service"},{"name":"CLIENT_PORT_LISTEN","value":"7777"},{"name":"CLIENT_PORT_MIN","value":"30100"},{"name":"CLIENT_PORT_MAX","value":"30105"},{"name":"POD_NAME","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.name"}}},{"name":"POD_NAMESPASE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}},{"name":"PMM_AGENT_SERVER_ADDRESS","value":"monitoring-service"},{"name":"PMM_AGENT_SERVER_USERNAME","value":"admin"},{"name":"PMM_AGENT_SERVER_PASSWORD","valueFrom":{"secretKeyRef":{"name":"some-name-pmm-secret","key":"password"}}},{"name":"PMM_AGENT_LISTEN_PORT","value":"7777"},{"name":"PMM_AGENT_PORTS_MIN","value":"30100"},{"name":"PMM_AGENT_PORTS_MAX","value":"30105"},{"name":"PMM_AGENT_CONFIG_FILE","value":"/usr/local/percona/pmm2/config/pmm-agent.yaml"},{"name":"PMM_AGENT_SERVER_INSECURE_TLS","value":"1"},{"name":"PMM_AGENT_LISTEN_ADDRESS","value":"0.0.0.0"},{"name":"PMM_AGENT_SETUP_NODE_NAME","value":"some-name"},{"name":"PMM_AGENT_SETUP_METRICS_MODE","value":"push"},{"name":"PMM_AGENT_SETUP","value":"1"},{"name":"PMM_AGENT_SETUP_FORCE","value":"1"},{"name":"PMM_AGENT_SETUP_NODE_TYPE","value":"container"},{"name":"DB_TYPE","value":"postgresql"},{"name":"PMM_AGENT_SIDECAR","value":"true"},{"name":"PMM_AGENT_SIDECAR_SLEEP","value":"5"},{"name":"DB_PASS","valueFrom":{"secretKeyRef":{"name":"some-name-postgres-secret","key":"password"}}},{"name":"PMM_AGENT_PATHS_TEMPDIR","value":"/tmp"},{"name":"PMM_AGENT_PRERUN_SCRIPT","value":"pmm-admin status --wait=10s;\npmm-admin add postgresql --skip-connection-check --metrics-mode=push --username=postgres --password=$(DB_PASS) --service-name=$(PMM_AGENT_SETUP_NODE_NAME) --host=$(POD_NAME) --port=5432 --query-source=pgstatmonitor;\npmm-admin annotate --service-name=$(PMM_AGENT_SETUP_NODE_NAME) 'Service restarted'"}],"resources":{},"livenessProbe":{"httpGet":{"path":"/local/Status","port":7777,"scheme":"HTTP"},"initialDelaySeconds":60,"timeoutSeconds":5,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"lifecycle":{"preStop":{"exec":{"command":["bash","-c","pmm-admin inventory remove node --force $(pmm-admin status --json | python -c \"import sys, json; print(json.load(sys.stdin)['pmm_agent_status']['node_id'])\")"]}}},"imagePullPolicy":"IfNotPresent"}], "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "root-volume" "name": "root-volume", "name": "some-name", "name": "some-name", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo" "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer-cm" "name": "some-name-pgbouncer-secret", "name": "some-name-pgbouncer-secret", "name": "some-name-pgbr-repo", "name": "some-name","pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.7.0" "name": "some-name-pgha-config", "name": "some-name-pgha-config", "name": "some-name-stanza-create", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"some-name"} "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgo-version": "1.7.0","name": "some-name","pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name" "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 2022, "ports": [{ "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-postgres-secret" "secretName": "some-name-primaryuser-secret" "secretName": "some-name-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "some-name" "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1G" "strategy": { "targetPort": 2022, "template": { time="2024-05-24T20:36:21Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.7.0 time="2024-05-24T20:38:16Z" level=error msg="Error completing workflow some-name-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.7.0 time="2024-05-24T20:38:16Z" level=error msg="pgtasks.pg.percona.com \"some-name-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.7.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "backup" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.87.34.83 --db-path=/pgdata/some-name" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/some-name" "value": "/pgdata/some-name" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "some-name" "value": "some-name-backrest-shared-repo" "value": "some-name-backrest-shared-repo-5ccbf9cc67-w4ptg" "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.87.34.83 --db-path=/pgdata/some-name" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.9Jeg1znX05/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.RZ5T8TkExh ++ mktemp + local LAST_ERR=/tmp/tmp.Myt7iVE5lg + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.9Jeg1znX05/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.9Jeg1znX05/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.9Jeg1znX05/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.RZ5T8TkExh + cat /tmp/tmp.Myt7iVE5lg Error from server (NotFound): error when deleting "/tmp/tmp.9Jeg1znX05/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.9Jeg1znX05/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.9Jeg1znX05/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.9Jeg1znX05/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.9Jeg1znX05/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.RZ5T8TkExh /tmp/tmp.Myt7iVE5lg + return 1 + true + yq w -d4 /tmp/tmp.9Jeg1znX05/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.9Jeg1znX05/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2J7o4RCEc8 ++ mktemp + local LAST_ERR=/tmp/tmp.rJCNwgo7Qb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl apply -f /tmp/tmp.9Jeg1znX05/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.2J7o4RCEc8 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.rJCNwgo7Qb + rm /tmp/tmp.2J7o4RCEc8 /tmp/tmp.rJCNwgo7Qb + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-7567 + retry=0 + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-7567 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.9Jeg1znX05/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.aD60TX29yM ++ mktemp + local LAST_ERR=/tmp/tmp.lU8fMJxsUx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete -f /tmp/tmp.9Jeg1znX05/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.aD60TX29yM serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.lU8fMJxsUx + rm /tmp/tmp.aD60TX29yM /tmp/tmp.lU8fMJxsUx + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' ++ mktemp + xargs kubectl delete ns --grace-period=0 --force=true + local LAST_OUT=/tmp/tmp.MVs7NpaTHr ++ mktemp + local LAST_ERR=/tmp/tmp.fQIQbJ6w0J + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.MVs7NpaTHr + cat /tmp/tmp.fQIQbJ6w0J + rm /tmp/tmp.MVs7NpaTHr /tmp/tmp.fQIQbJ6w0J + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "monitoring-7567" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.OS9pXEMYPD ++ mktemp + local LAST_ERR=/tmp/tmp.27k1RzJFxG + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.OS9pXEMYPD + cat /tmp/tmp.27k1RzJFxG Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.OS9pXEMYPD /tmp/tmp.27k1RzJFxG + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.AgbG0ZzIYq ++ mktemp + local LAST_ERR=/tmp/tmp.Sk2g01LhsM + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-776-0087ef129-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.AgbG0ZzIYq + cat /tmp/tmp.Sk2g01LhsM Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.AgbG0ZzIYq /tmp/tmp.Sk2g01LhsM + return 1 + true + rm -rf /tmp/tmp.9Jeg1znX05