++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/monitoring.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/logs/monitoring.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + create_namespace monitoring-24877 + local namespace=monitoring-24877 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + xargs kubectl delete ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.BaptaCNX99 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.iyVMu4kHKj + local LAST_ERR=/tmp/tmp.Ny9gYyrIX0 + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.L3hsZOEsum + local exit_status=0 + local timeout=4 + set +o errexit + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BaptaCNX99 + cat /tmp/tmp.Ny9gYyrIX0 + rm /tmp/tmp.BaptaCNX99 /tmp/tmp.Ny9gYyrIX0 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.iyVMu4kHKj + cat /tmp/tmp.L3hsZOEsum Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.iyVMu4kHKj /tmp/tmp.L3hsZOEsum + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.OaMWJV6bOC ++ mktemp + local LAST_ERR=/tmp/tmp.FLJVBPsc0E + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.OaMWJV6bOC + cat /tmp/tmp.FLJVBPsc0E Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.OaMWJV6bOC /tmp/tmp.FLJVBPsc0E + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace monitoring-24877 ++ mktemp + local LAST_OUT=/tmp/tmp.1wpTsTtag6 ++ mktemp + local LAST_ERR=/tmp/tmp.IcfO9lhGob + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace monitoring-24877 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace monitoring-24877 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete namespace monitoring-24877 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.1wpTsTtag6 + cat /tmp/tmp.IcfO9lhGob Error from server (NotFound): namespaces "monitoring-24877" not found + rm /tmp/tmp.1wpTsTtag6 /tmp/tmp.IcfO9lhGob + return 1 + : + wait_for_delete namespace/monitoring-24877 + local res=namespace/monitoring-24877 + set +o xtrace namespace/monitoring-24877 - Error from server (NotFound): namespaces "monitoring-24877" not found + kubectl_bin create namespace monitoring-24877 ++ mktemp + local LAST_OUT=/tmp/tmp.yYudOLS465 ++ mktemp + local LAST_ERR=/tmp/tmp.QU20ic62Q6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl create namespace monitoring-24877 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.yYudOLS465 namespace/monitoring-24877 created + cat /tmp/tmp.QU20ic62Q6 + rm /tmp/tmp.yYudOLS465 /tmp/tmp.QU20ic62Q6 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.1vxcgdAM5K +++ mktemp ++ local LAST_ERR=/tmp/tmp.rfcmSo1heB ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.1vxcgdAM5K ++ cat /tmp/tmp.rfcmSo1heB ++ rm /tmp/tmp.1vxcgdAM5K /tmp/tmp.rfcmSo1heB ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox --namespace=monitoring-24877 ++ mktemp + local LAST_OUT=/tmp/tmp.M8phz6pZgm ++ mktemp + local LAST_ERR=/tmp/tmp.nQ7Hhid7Ct + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox --namespace=monitoring-24877 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.M8phz6pZgm Context "gke_cloud-dev-112233_us-central1-c_jen-pg-721-a67b3475b-2-sandbox" modified. + cat /tmp/tmp.nQ7Hhid7Ct + rm /tmp/tmp.M8phz6pZgm /tmp/tmp.nQ7Hhid7Ct + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=monitoring-24877 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml disabled install monitoring-24877 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=monitoring-24877 + local dont_send_telemetry=false + /usr/bin/sed -e 's#^namespace: .*#namespace: "monitoring-24877"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "monitoring-24877"#g' + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml 'data[values.yaml]' + cat /dev/fd/63 - + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' ++ echo 'crunchy_debug: "true"' ++ cut -d: -f2 ++ cut -d: -f1 ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b ++ echo perconalab/percona-postgresql-operator:PR-721-a67b3475b + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-721-a67b3475b"#g' + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w -d3 - 'subjects[0].namespace' monitoring-24877 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-721-a67b3475b-pgo-deployer + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/operator.yaml metadata.namespace monitoring-24877 + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install ++ cat /tmp/tmp.6t4QFNzt7D/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "monitoring-24877" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-721-a67b3475b" pgo_operator_namespace: "monitoring-24877"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.6t4QFNzt7D/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.BVLmquJJuz ++ mktemp + local LAST_ERR=/tmp/tmp.inGS9MUv6u + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.6t4QFNzt7D/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.BVLmquJJuz serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.inGS9MUv6u + rm /tmp/tmp.BVLmquJJuz /tmp/tmp.inGS9MUv6u + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-24877 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.HQ9WoWsvKw ++ mktemp + local LAST_ERR=/tmp/tmp.1xqpfHCehs + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.HQ9WoWsvKw serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.1xqpfHCehs + rm /tmp/tmp.HQ9WoWsvKw /tmp/tmp.1xqpfHCehs + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MykESaiTT7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XdrbAAOS6A ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.MykESaiTT7 ++ cat /tmp/tmp.XdrbAAOS6A ++ rm /tmp/tmp.MykESaiTT7 /tmp/tmp.XdrbAAOS6A ++ return 0 + wait_pod postgres-operator-b85fdf48b-8jnsw + local pod=postgres-operator-b85fdf48b-8jnsw + local ns= + set +o xtrace postgres-operator-b85fdf48b-8jnswtrue + [[ -n '' ]] + deploy_helm monitoring-24877 + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + helm repo remove percona "percona" has been removed from your repositories + helm repo add percona https://percona-charts.storage.googleapis.com/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'create first PGO cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PGO cluster ----------------------------------------------------------------------------------- + cluster=some-name + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + platform=kubernetes + helm del --purge monitoring Error: unknown flag: --purge + : + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server --set platform=kubernetes https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Apr 23 16:26:40 2024 NAMESPACE: monitoring-24877 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-24877.svc.cluster.local:443 login: admin password: admin + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.RYXr8zh8Td ++ mktemp + local LAST_ERR=/tmp/tmp.jmbCQ2vFLb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.RYXr8zh8Td + cat /tmp/tmp.jmbCQ2vFLb error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.RYXr8zh8Td /tmp/tmp.jmbCQ2vFLb + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.wSfZGMMCCQ ++ mktemp + local LAST_ERR=/tmp/tmp.j8LbNWftuP + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.wSfZGMMCCQ + cat /tmp/tmp.j8LbNWftuP error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.wSfZGMMCCQ /tmp/tmp.j8LbNWftuP + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.pB3okN8770 ++ mktemp + local LAST_ERR=/tmp/tmp.YVgzKvbLMT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.pB3okN8770 + cat /tmp/tmp.YVgzKvbLMT error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.pB3okN8770 /tmp/tmp.YVgzKvbLMT + return 1 + echo 'Retry 2' Retry 2 + sleep 5 + let retry+=1 + '[' 3 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.Sc07yDehkF ++ mktemp + local LAST_ERR=/tmp/tmp.oOP88yCgf8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Sc07yDehkF + cat /tmp/tmp.oOP88yCgf8 + rm /tmp/tmp.Sc07yDehkF /tmp/tmp.oOP88yCgf8 + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IXLUZ323DS +++ mktemp ++ local LAST_ERR=/tmp/tmp.C18ZWiuCPg ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.IXLUZ323DS ++ cat /tmp/tmp.C18ZWiuCPg ++ rm /tmp/tmp.IXLUZ323DS /tmp/tmp.C18ZWiuCPg ++ return 0 + ADMIN_PASSWORD=admin + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.lS9t3IIvhq ++ mktemp + local LAST_ERR=/tmp/tmp.8gjR7dcVTt + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.lS9t3IIvhq logger=settings t=2024-04-23T16:27:46.677142626Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2024-04-23T16:27:46.677351742Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2024-04-23T16:27:46.67738234Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2024-04-23T16:27:46.677395852Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2024-04-23T16:27:46.67740552Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2024-04-23T16:27:46.677413571Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2024-04-23T16:27:46.677420608Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2024-04-23T16:27:46.677431522Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2024-04-23T16:27:46.677440242Z level=info msg="App mode production" logger=sqlstore t=2024-04-23T16:27:46.677555812Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2024-04-23T16:27:46.70709387Z level=info msg="Starting DB migrations" logger=migrator t=2024-04-23T16:27:46.713420462Z level=info msg="migrations completed" performed=0 skipped=452 duration=665.755µs logger=secrets t=2024-04-23T16:27:46.7159844Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2024-04-23T16:27:46.762477912Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2024-04-23T16:27:47.033620342Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2024-04-23T16:27:47.03367494Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2024-04-23T16:27:47.033705413Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2024-04-23T16:27:47.056410064Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2024-04-23T16:27:47.056901449Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2024-04-23T16:27:47.056967134Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.loader t=2024-04-23T16:27:47.057130194Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2024-04-23T16:27:47.057150128Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2024-04-23T16:27:47.057160009Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2024-04-23T16:27:47.057171128Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2024-04-23T16:27:47.057180826Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2024-04-23T16:27:47.057188897Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2024-04-23T16:27:47.057199277Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2024-04-23T16:27:47.057208788Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2024-04-23T16:27:47.075181802Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2024-04-23T16:27:47.075227355Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2024-04-23T16:27:47.075243121Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2024-04-23T16:27:47.075253417Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2024-04-23T16:27:47.075261252Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel Admin password changed successfully ✔ + cat /tmp/tmp.8gjR7dcVTt + rm /tmp/tmp.lS9t3IIvhq /tmp/tmp.8gjR7dcVTt + return 0 + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/conf/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/conf/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + local namespace=monitoring-24877 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name monitoring-24877 + local cluster=some-name + local namespace=monitoring-24877 + yq w - metadata.name some-name-users-secret + yq w - stringData.some-name some-name_pass + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n monitoring-24877 apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wJkjFG11jb ++ mktemp + local LAST_ERR=/tmp/tmp.KXB2rANqtL + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wJkjFG11jb secret/some-name-users-secret created + cat /tmp/tmp.KXB2rANqtL + rm /tmp/tmp.wJkjFG11jb /tmp/tmp.KXB2rANqtL + return 0 + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-721/deploy/users-secret.yaml + kubectl_bin -n monitoring-24877 apply -f - + yq w - metadata.name some-name-pmm-secret ++ mktemp + local LAST_OUT=/tmp/tmp.dxlzxEfjk7 ++ mktemp + local LAST_ERR=/tmp/tmp.hCBnIbYMj8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.dxlzxEfjk7 secret/some-name-pmm-secret created + cat /tmp/tmp.hCBnIbYMj8 + rm /tmp/tmp.dxlzxEfjk7 /tmp/tmp.hCBnIbYMj8 + return 0 + kubectl_bin -n monitoring-24877 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.5ucL9830U1 ++ mktemp + local LAST_ERR=/tmp/tmp.rPvMOGMmhZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.5ucL9830U1 deployment.apps/pg-client created + cat /tmp/tmp.rPvMOGMmhZ + rm /tmp/tmp.5ucL9830U1 /tmp/tmp.rPvMOGMmhZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/conf/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/conf/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/conf/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - metadata.labels.name some-name + yq w - metadata.labels.pg-cluster some-name + yq w - spec.keepBackups false + yq w - metadata.labels.deployment-name some-name + yq w - spec.keepData false + yq w - spec.clustername some-name + yq w - spec.database some-name + yq w - metadata.name some-name + yq w - spec.user some-name + yq w - spec.secretsName some-name-users-secret + yq w - spec.name some-name + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.6t4QFNzt7D/cr.yaml -n monitoring-24877 ++ mktemp + local LAST_OUT=/tmp/tmp.nYkZ2EKzCX ++ mktemp + local LAST_ERR=/tmp/tmp.8fUpNaQ2dk + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.6t4QFNzt7D/cr.yaml -n monitoring-24877 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.nYkZ2EKzCX perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.8fUpNaQ2dk + rm /tmp/tmp.nYkZ2EKzCX /tmp/tmp.8fUpNaQ2dk + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=monitoring-24877 + sleep 10 + set +o xtrace some-name-backrest-shared-repo........ + wait_deployment some-name + local name=some-name + local target_namespace=monitoring-24877 + sleep 10 + set +o xtrace some-name......... + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=monitoring-24877 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=monitoring-24877 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=monitoring-24877 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7PhQQmEzEN +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mq462jEQAU ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.7PhQQmEzEN ++ cat /tmp/tmp.Mq462jEQAU ++ rm /tmp/tmp.7PhQQmEzEN /tmp/tmp.Mq462jEQAU ++ return 0 + status_query='kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xYcjK6lJc6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wAWCufix3N +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.xYcjK6lJc6 +++ cat /tmp/tmp.wAWCufix3N +++ rm /tmp/tmp.xYcjK6lJc6 /tmp/tmp.wAWCufix3N +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ShMnm0h2A9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CPqUlcJtqg +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.ShMnm0h2A9 +++ cat /tmp/tmp.CPqUlcJtqg +++ rm /tmp/tmp.ShMnm0h2A9 /tmp/tmp.CPqUlcJtqg +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-24877 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o5DOtnfamc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vOrTcwyVno +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.o5DOtnfamc +++ cat /tmp/tmp.vOrTcwyVno +++ rm /tmp/tmp.o5DOtnfamc /tmp/tmp.vOrTcwyVno +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bwEEW70no8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nldCZYjPG4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.bwEEW70no8 +++ cat /tmp/tmp.nldCZYjPG4 +++ rm /tmp/tmp.bwEEW70no8 /tmp/tmp.nldCZYjPG4 +++ return 0 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9vBlB8Q2sV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TC5LLQdde6 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.9vBlB8Q2sV +++ cat /tmp/tmp.TC5LLQdde6 +++ rm /tmp/tmp.9vBlB8Q2sV /tmp/tmp.TC5LLQdde6 +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dL15wm6K38 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1u3cu3raPZ ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.dL15wm6K38 ++++ cat /tmp/tmp.1u3cu3raPZ ++++ rm /tmp/tmp.dL15wm6K38 /tmp/tmp.1u3cu3raPZ ++++ return 0 +++ kubectl_bin -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fKnSVOVIMn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wewKqCjqCV +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.fKnSVOVIMn +++ cat /tmp/tmp.wewKqCjqCV +++ rm /tmp/tmp.fKnSVOVIMn /tmp/tmp.wewKqCjqCV +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.monitoring-24877 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.monitoring-24877 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XX8gxlX4MH +++ mktemp ++ local LAST_ERR=/tmp/tmp.gMojwQNDkf ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.XX8gxlX4MH ++ cat /tmp/tmp.gMojwQNDkf ++ rm /tmp/tmp.XX8gxlX4MH /tmp/tmp.gMojwQNDkf ++ return 0 + local client_container=pg-client-76df9df876-rn5bw + kubectl_bin exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.3g8IHkJouN ++ mktemp + local LAST_ERR=/tmp/tmp.YI68S6Y2EZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.3g8IHkJouN + cat /tmp/tmp.YI68S6Y2EZ + rm /tmp/tmp.3g8IHkJouN /tmp/tmp.YI68S6Y2EZ + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-24877 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YSFivbGDdC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pL2qKbcFpZ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.YSFivbGDdC +++ cat /tmp/tmp.pL2qKbcFpZ +++ rm /tmp/tmp.YSFivbGDdC /tmp/tmp.pL2qKbcFpZ +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XrfNFTT7oZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LVZg5EenBt +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.XrfNFTT7oZ +++ cat /tmp/tmp.LVZg5EenBt +++ rm /tmp/tmp.XrfNFTT7oZ /tmp/tmp.LVZg5EenBt +++ return 0 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dfYFEKDkWH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MPmFIhF3kg +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dfYFEKDkWH +++ cat /tmp/tmp.MPmFIhF3kg +++ rm /tmp/tmp.dfYFEKDkWH /tmp/tmp.MPmFIhF3kg +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rRCZ5Ogop6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SO0rfsfVtG ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.rRCZ5Ogop6 ++++ cat /tmp/tmp.SO0rfsfVtG ++++ rm /tmp/tmp.rRCZ5Ogop6 /tmp/tmp.SO0rfsfVtG ++++ return 0 +++ kubectl_bin -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iw3NI7BCEO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2g9alNPNL3 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.iw3NI7BCEO +++ cat /tmp/tmp.2g9alNPNL3 +++ rm /tmp/tmp.iw3NI7BCEO /tmp/tmp.2g9alNPNL3 +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.monitoring-24877 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.monitoring-24877 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fDKJWFr0r5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8pfmDRUU9v ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.fDKJWFr0r5 ++ cat /tmp/tmp.8pfmDRUU9v ++ rm /tmp/tmp.fDKJWFr0r5 /tmp/tmp.8pfmDRUU9v ++ return 0 + local client_container=pg-client-76df9df876-rn5bw + kubectl_bin exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.Cr9C8Rwu6P ++ mktemp + local LAST_ERR=/tmp/tmp.T7YMwWXiA8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Cr9C8Rwu6P + cat /tmp/tmp.T7YMwWXiA8 + rm /tmp/tmp.Cr9C8Rwu6P /tmp/tmp.T7YMwWXiA8 + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-24877 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qvDWH55dQe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AHaTdCN0wI +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.qvDWH55dQe +++ cat /tmp/tmp.AHaTdCN0wI +++ rm /tmp/tmp.qvDWH55dQe /tmp/tmp.AHaTdCN0wI +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3dLWSeGP87 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RAhqBmDP3A +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.3dLWSeGP87 +++ cat /tmp/tmp.RAhqBmDP3A +++ rm /tmp/tmp.3dLWSeGP87 /tmp/tmp.RAhqBmDP3A +++ return 0 +++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.a1oOLO1o94 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mrCRM8jWPM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.a1oOLO1o94 +++ cat /tmp/tmp.mrCRM8jWPM +++ rm /tmp/tmp.a1oOLO1o94 /tmp/tmp.mrCRM8jWPM +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.l35iDCUl6D +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.q1W3jRGTJv ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++++ kubectl -n monitoring-24877 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.l35iDCUl6D ++++ cat /tmp/tmp.q1W3jRGTJv ++++ rm /tmp/tmp.l35iDCUl6D /tmp/tmp.q1W3jRGTJv ++++ return 0 +++ kubectl_bin -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QXbQDcSx67 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.56MGbZihMA +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl -n monitoring-24877 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.QXbQDcSx67 +++ cat /tmp/tmp.56MGbZihMA +++ rm /tmp/tmp.QXbQDcSx67 /tmp/tmp.56MGbZihMA +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.monitoring-24877 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.monitoring-24877 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.slrlC2i6qt +++ mktemp ++ local LAST_ERR=/tmp/tmp.fTllXG008u ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl -n monitoring-24877 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.slrlC2i6qt ++ cat /tmp/tmp.fTllXG008u ++ rm /tmp/tmp.slrlC2i6qt /tmp/tmp.fTllXG008u ++ return 0 + local client_container=pg-client-76df9df876-rn5bw + kubectl_bin exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.5ynciKekHn ++ mktemp + local LAST_ERR=/tmp/tmp.fpyRTStNmT + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl exec pg-client-76df9df876-rn5bw -n monitoring-24877 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-24877.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.5ynciKekHn + cat /tmp/tmp.fpyRTStNmT + rm /tmp/tmp.5ynciKekHn /tmp/tmp.fpyRTStNmT + return 0 + sleep 10 + desc 'compare deployment' + set +o xtrace ----------------------------------------------------------------------------------- compare deployment ----------------------------------------------------------------------------------- + compare_kubectl deployment/some-name + local resource=deployment/some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/compare/deployment_some-name.yml + local new_result=/tmp/tmp.6t4QFNzt7D/deployment_some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name + yq d - metadata.managedFields + yq d - '**.creationTimestamp' + yq d - '**.namespace' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - metadata.selfLink + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.creationTimestamp' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**.dataSource' + yq d - '**.image' + yq d - '**.volumeName' + yq d - '**.clusterIP' + yq d - '**.storageClassName' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - metadata.deletionTimestamp + yq d - '**."percona.com/*"' + yq d - spec.nodeName + yq d - '**.clusterIPs' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.nodePort' + yq d - '**.healthCheckNodePort' + yq d - '**.procMount' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.imagePullSecrets' + yq d - '**.finalizers' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.enableServiceLinks' ++ mktemp + yq d - '**.controller-uid' + yq d - status + local LAST_OUT=/tmp/tmp.K7hr6Vi6Bk + yq d - '**.(name==NAMESPACE)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.(name==COMMAND_OPTS)' + yq d - metadata.resourceVersion + yq d - '**."volume.kubernetes.io/storage-provisioner"' ++ mktemp + yq d - spec.volumeMode + local LAST_ERR=/tmp/tmp.NZoYkxAhfj + local exit_status=0 + local timeout=4 + set +o errexit + yq d - '**.(name==suffix)' + yq d - '**.(name==PODNAME)' + yq d - '**.ipFamilies' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.ipFamilyPolicy' + yq d - '**.preemptionPolicy' ++ seq 0 2 + yq d - '**.internalTrafficPolicy' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get -o yaml deployment/some-name + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.uid' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.K7hr6Vi6Bk + cat /tmp/tmp.NZoYkxAhfj + rm /tmp/tmp.K7hr6Vi6Bk /tmp/tmp.NZoYkxAhfj + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.6t4QFNzt7D/deployment_some-name.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-721/e2e-tests/monitoring/compare/deployment_some-name.yml /tmp/tmp.6t4QFNzt7D/deployment_some-name.yml + desc 'check metrics' + set +o xtrace ----------------------------------------------------------------------------------- check metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds some-name admin:admin + local metric=node_boot_time_seconds + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1713889795 ++ /usr/bin/date -u +%s + local end=1713889855 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.di1AkqQ3fs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xy5FnBWO89 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.di1AkqQ3fs +++ cat /tmp/tmp.Xy5FnBWO89 +++ rm /tmp/tmp.di1AkqQ3fs /tmp/tmp.Xy5FnBWO89 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.htS3koTK2Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tVQrjShrBW +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.htS3koTK2Y +++ cat /tmp/tmp.tVQrjShrBW +++ rm /tmp/tmp.htS3koTK2Y /tmp/tmp.tVQrjShrBW +++ return 0 ++ local ip=34.66.44.55 ++ '[' -n 34.66.44.55 -a 34.66.44.55 '!=' null ']' ++ echo 34.66.44.55 ++ return + local endpoint=34.66.44.55 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.66.44.55/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7D%29&start=1713889795&end=1713889855&step=60' "1713878936" "1713878936" + desc 'check qan20 metrics' + set +o xtrace ----------------------------------------------------------------------------------- check qan20 metrics ----------------------------------------------------------------------------------- + get_qan20_values some-name admin:admin + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2024-04-23T16:00:56 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2024-04-23T16:30:56 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hGasIBJg5S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XSmTdEwjMc +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.hGasIBJg5S +++ cat /tmp/tmp.XSmTdEwjMc +++ rm /tmp/tmp.hGasIBJg5S /tmp/tmp.XSmTdEwjMc +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qin9JyhkR6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XrUJhVJ0ew +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Qin9JyhkR6 +++ cat /tmp/tmp.XrUJhVJ0ew +++ rm /tmp/tmp.Qin9JyhkR6 /tmp/tmp.XrUJhVJ0ew +++ return 0 ++ local ip=34.66.44.55 ++ '[' -n 34.66.44.55 -a 34.66.44.55 '!=' null ']' ++ echo 34.66.44.55 ++ return + local endpoint=34.66.44.55 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@34.66.44.55/v0/qan/GetReport + jq '.rows[].sparkline' [ { "time_frame": 360, "timestamp": "2024-04-23T16:30:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2024-04-23T16:24:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2024-04-23T16:18:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2024-04-23T16:12:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2024-04-23T16:06:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2024-04-23T16:00:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2024-04-23T15:54:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2024-04-23T15:48:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2024-04-23T15:42:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2024-04-23T15:36:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2024-04-23T15:30:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2024-04-23T15:24:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2024-04-23T15:18:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2024-04-23T15:12:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2024-04-23T15:06:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2024-04-23T15:00:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2024-04-23T14:54:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2024-04-23T14:48:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2024-04-23T14:42:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2024-04-23T14:36:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2024-04-23T14:30:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2024-04-23T14:24:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2024-04-23T14:18:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2024-04-23T14:12:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2024-04-23T14:06:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2024-04-23T14:00:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2024-04-23T13:54:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2024-04-23T13:48:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2024-04-23T13:42:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2024-04-23T13:36:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2024-04-23T13:30:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2024-04-23T13:24:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2024-04-23T13:18:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2024-04-23T13:12:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2024-04-23T13:06:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2024-04-23T13:00:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2024-04-23T12:54:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2024-04-23T12:48:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2024-04-23T12:42:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2024-04-23T12:36:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2024-04-23T12:30:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2024-04-23T12:24:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2024-04-23T12:18:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2024-04-23T12:12:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2024-04-23T12:06:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2024-04-23T12:00:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2024-04-23T11:54:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2024-04-23T11:48:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2024-04-23T11:42:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2024-04-23T11:36:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2024-04-23T11:30:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2024-04-23T11:24:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2024-04-23T11:18:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2024-04-23T11:12:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2024-04-23T11:06:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2024-04-23T11:00:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2024-04-23T10:54:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2024-04-23T10:48:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2024-04-23T10:42:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2024-04-23T10:36:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2024-04-23T10:30:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2024-04-23T10:24:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2024-04-23T10:18:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2024-04-23T10:12:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2024-04-23T10:06:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2024-04-23T10:00:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2024-04-23T09:54:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2024-04-23T09:48:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2024-04-23T09:42:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2024-04-23T09:36:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2024-04-23T09:30:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2024-04-23T09:24:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2024-04-23T09:18:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2024-04-23T09:12:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2024-04-23T09:06:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2024-04-23T09:00:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2024-04-23T08:54:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2024-04-23T08:48:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2024-04-23T08:42:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2024-04-23T08:36:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2024-04-23T08:30:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2024-04-23T08:24:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2024-04-23T08:18:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2024-04-23T08:12:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2024-04-23T08:06:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2024-04-23T08:00:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2024-04-23T07:54:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2024-04-23T07:48:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2024-04-23T07:42:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2024-04-23T07:36:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2024-04-23T07:30:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2024-04-23T07:24:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2024-04-23T07:18:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2024-04-23T07:12:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2024-04-23T07:06:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2024-04-23T07:00:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2024-04-23T06:54:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2024-04-23T06:48:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2024-04-23T06:42:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2024-04-23T06:36:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2024-04-23T06:30:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2024-04-23T06:24:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2024-04-23T06:18:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2024-04-23T06:12:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2024-04-23T06:06:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2024-04-23T06:00:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2024-04-23T05:54:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2024-04-23T05:48:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2024-04-23T05:42:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2024-04-23T05:36:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2024-04-23T05:30:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2024-04-23T05:24:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2024-04-23T05:18:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2024-04-23T05:12:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2024-04-23T05:06:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2024-04-23T05:00:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2024-04-23T04:54:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2024-04-23T04:48:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2024-04-23T04:42:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2024-04-23T04:36:00Z" } ] + rm -f payload.json + [[ -n '' ]] + helm delete monitoring release "monitoring" uninstalled + destroy monitoring-24877 + grep -v level=debug + grep -v level=info + grep -v 'Getting tasks for pod' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + tee /tmp/tmp.6t4QFNzt7D/operator.log +++ mktemp + grep -v 'the object has been modified' + sort -u + grep -v 'Getting pods from source' ++ local LAST_OUT=/tmp/tmp.S2naOgs38x +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Hk8zcSSeq ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.S2naOgs38x ++ cat /tmp/tmp.4Hk8zcSSeq ++ rm /tmp/tmp.S2naOgs38x /tmp/tmp.4Hk8zcSSeq ++ return 0 + kubectl_bin logs postgres-operator-b85fdf48b-8jnsw -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.NssVMscqqW ++ mktemp + local LAST_ERR=/tmp/tmp.8ewMjZtw9e + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl logs postgres-operator-b85fdf48b-8jnsw -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.NssVMscqqW + cat /tmp/tmp.8ewMjZtw9e + rm /tmp/tmp.NssVMscqqW /tmp/tmp.8ewMjZtw9e + return 0 ] { } }, ] } }, { } }, { } ] } } ] } { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ], { } }, }], { } }, }], } ], } }, } }, } }, }, { } }, }, 2024/04/23 16:27:52 INF 1 (localhost:4150) connecting to nsqd 2024/04/23 16:28:53 INF 2 (localhost:4150) connecting to nsqd 2024/04/23 16:28:54 INF 3 (localhost:4150) connecting to nsqd 2024/04/23 16:29:01 INF 4 (localhost:4150) connecting to nsqd 2024/04/23 16:29:28 INF 5 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "some-name-pgbr-repo" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "2", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "defaultMode": 288 "defaultMode": 420, "deployment-name": "some-name" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0423 16:26:23.112156 1 shared_informer.go:240] Waiting for caches to sync for namespace I0423 16:26:23.212489 1 shared_informer.go:247] Caches are synced for namespace I0423 16:26:23.373853 1 shared_informer.go:240] Waiting for caches to sync for monitoring-24877 I0423 16:26:23.474791 1 shared_informer.go:247] Caches are synced for monitoring-24877 "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "limits": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "48Mi" "memory": "512Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "name": "backrest", "name": "backrest-backup-some-name", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", , {"name":"pmm-client","image":"perconalab/pmm-client:2.41.0","ports":[{"containerPort":7777,"protocol":"TCP"},{"containerPort":30100},{"containerPort":30101},{"containerPort":30102},{"containerPort":30103},{"containerPort":30104},{"containerPort":30105}],"env":[{"name":"PMM_USER","value":"admin"},{"name":"PMM_SERVER","value":"monitoring-service"},{"name":"CLIENT_PORT_LISTEN","value":"7777"},{"name":"CLIENT_PORT_MIN","value":"30100"},{"name":"CLIENT_PORT_MAX","value":"30105"},{"name":"POD_NAME","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.name"}}},{"name":"POD_NAMESPASE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}},{"name":"PMM_AGENT_SERVER_ADDRESS","value":"monitoring-service"},{"name":"PMM_AGENT_SERVER_USERNAME","value":"admin"},{"name":"PMM_AGENT_SERVER_PASSWORD","valueFrom":{"secretKeyRef":{"name":"some-name-pmm-secret","key":"password"}}},{"name":"PMM_AGENT_LISTEN_PORT","value":"7777"},{"name":"PMM_AGENT_PORTS_MIN","value":"30100"},{"name":"PMM_AGENT_PORTS_MAX","value":"30105"},{"name":"PMM_AGENT_CONFIG_FILE","value":"/usr/local/percona/pmm2/config/pmm-agent.yaml"},{"name":"PMM_AGENT_SERVER_INSECURE_TLS","value":"1"},{"name":"PMM_AGENT_LISTEN_ADDRESS","value":"0.0.0.0"},{"name":"PMM_AGENT_SETUP_NODE_NAME","value":"some-name"},{"name":"PMM_AGENT_SETUP_METRICS_MODE","value":"push"},{"name":"PMM_AGENT_SETUP","value":"1"},{"name":"PMM_AGENT_SETUP_FORCE","value":"1"},{"name":"PMM_AGENT_SETUP_NODE_TYPE","value":"container"},{"name":"DB_TYPE","value":"postgresql"},{"name":"PMM_AGENT_SIDECAR","value":"true"},{"name":"PMM_AGENT_SIDECAR_SLEEP","value":"5"},{"name":"DB_PASS","valueFrom":{"secretKeyRef":{"name":"some-name-postgres-secret","key":"password"}}},{"name":"PMM_AGENT_PATHS_TEMPDIR","value":"/tmp"},{"name":"PMM_AGENT_PRERUN_SCRIPT","value":"pmm-admin status --wait=10s;\npmm-admin add postgresql --skip-connection-check --metrics-mode=push --username=postgres --password=$(DB_PASS) --service-name=$(PMM_AGENT_SETUP_NODE_NAME) --host=$(POD_NAME) --port=5432 --query-source=pgstatmonitor;\npmm-admin annotate --service-name=$(PMM_AGENT_SETUP_NODE_NAME) 'Service restarted'"}],"resources":{},"livenessProbe":{"httpGet":{"path":"/local/Status","port":7777,"scheme":"HTTP"},"initialDelaySeconds":60,"timeoutSeconds":5,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"lifecycle":{"preStop":{"exec":{"command":["bash","-c","pmm-admin inventory remove node --force $(pmm-admin status --json | python -c \"import sys, json; print(json.load(sys.stdin)['pmm_agent_status']['node_id'])\")"]}}},"imagePullPolicy":"IfNotPresent"}], "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "root-volume" "name": "root-volume", "name": "some-name", "name": "some-name", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo" "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer-cm" "name": "some-name-pgbouncer-secret", "name": "some-name-pgbouncer-secret", "name": "some-name-pgbr-repo", "name": "some-name-pgha-config", "name": "some-name-pgha-config", "name": "some-name-stanza-create", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"some-name"} "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name" "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name" "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 2022, "ports": [{ "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-postgres-secret" "secretName": "some-name-primaryuser-secret" "secretName": "some-name-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "some-name" "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1G" "strategy": { "targetPort": 2022, "template": { time="2024-04-23T16:26:22Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-23T16:28:53Z" level=error msg="Error completing workflow some-name-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-23T16:28:53Z" level=error msg="pgtasks.pg.percona.com \"some-name-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "backup" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.55.250.89 --db-path=/pgdata/some-name" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/some-name" "value": "/pgdata/some-name" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "some-name" "value": "some-name-backrest-shared-repo" "value": "some-name-backrest-shared-repo-5ccbf9cc67-lvn76" "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.55.250.89 --db-path=/pgdata/some-name" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.H03ecxmhem ++ mktemp + local LAST_ERR=/tmp/tmp.b3R6resXm0 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.6t4QFNzt7D/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.H03ecxmhem + cat /tmp/tmp.b3R6resXm0 Error from server (NotFound): error when deleting "/tmp/tmp.6t4QFNzt7D/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.6t4QFNzt7D/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.6t4QFNzt7D/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.6t4QFNzt7D/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.6t4QFNzt7D/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.H03ecxmhem /tmp/tmp.b3R6resXm0 + return 1 + true + yq w -d4 /tmp/tmp.6t4QFNzt7D/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.6t4QFNzt7D/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YfHCUd0qD8 ++ mktemp + local LAST_ERR=/tmp/tmp.evVblCpcQs + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl apply -f /tmp/tmp.6t4QFNzt7D/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.YfHCUd0qD8 serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.evVblCpcQs + rm /tmp/tmp.YfHCUd0qD8 /tmp/tmp.evVblCpcQs + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-24877 + retry=0 + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-24877 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.6t4QFNzt7D/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.pDKvFjwYsl ++ mktemp + local LAST_ERR=/tmp/tmp.7oCNOaMUI5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete -f /tmp/tmp.6t4QFNzt7D/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.pDKvFjwYsl serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.7oCNOaMUI5 + rm /tmp/tmp.pDKvFjwYsl /tmp/tmp.7oCNOaMUI5 + return 0 + kubectl_bin get ns + awk '{print$1}' + xargs kubectl delete ns --grace-period=0 --force=true + egrep -v '^kube-|^default|Terminating|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.acO9mQurnN ++ mktemp + local LAST_ERR=/tmp/tmp.SBDsBp3aH6 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.acO9mQurnN + cat /tmp/tmp.SBDsBp3aH6 + rm /tmp/tmp.acO9mQurnN /tmp/tmp.SBDsBp3aH6 + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "monitoring-24877" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.eIypm8YJrL ++ mktemp + local LAST_ERR=/tmp/tmp.jjtog2iHIl + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.eIypm8YJrL + cat /tmp/tmp.jjtog2iHIl Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.eIypm8YJrL /tmp/tmp.jjtog2iHIl + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.tBQzxMUYvK ++ mktemp + local LAST_ERR=/tmp/tmp.7VDUZCY4XB + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-721-a67b3475b-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.tBQzxMUYvK + cat /tmp/tmp.7VDUZCY4XB Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.tBQzxMUYvK /tmp/tmp.7VDUZCY4XB + return 1 + true + rm -rf /tmp/tmp.6t4QFNzt7D