++ echo 'Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/monitoring.log' Log: /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/logs/monitoring.log ++ '[' -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: the server doesn't have a resource type "projects" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.14.4 ++ '[' v3 == v2 ']' + create_namespace monitoring-26137 + local namespace=monitoring-26137 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' + cleanup_rbac + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + kubectl_bin get ns + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.IOW8sJUh0Q ++ mktemp + local LAST_OUT=/tmp/tmp.CQs82CazqU + local LAST_ERR=/tmp/tmp.tDjQncfXqH + local exit_status=0 + local timeout=4 + set +o errexit ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Zb6gsGvsMw + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.IOW8sJUh0Q + cat /tmp/tmp.tDjQncfXqH + rm /tmp/tmp.IOW8sJUh0Q /tmp/tmp.tDjQncfXqH + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.CQs82CazqU + cat /tmp/tmp.Zb6gsGvsMw Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.CQs82CazqU /tmp/tmp.Zb6gsGvsMw + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level ++ mktemp + local LAST_OUT=/tmp/tmp.avFgzGSZwD ++ mktemp + local LAST_ERR=/tmp/tmp.TfPLbhvxGS + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-controller-manager-cluster-level + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.avFgzGSZwD + cat /tmp/tmp.TfPLbhvxGS Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" not found + rm /tmp/tmp.avFgzGSZwD /tmp/tmp.TfPLbhvxGS + return 1 + true + kubectl delete MutatingWebhookConfiguration/chaos-mesh-mutation ValidatingWebhookConfiguration/chaos-mesh-validation ValidatingWebhookConfiguration/validate-auth Error from server (NotFound): mutatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-mutation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "chaos-mesh-validation" not found Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "validate-auth" not found + true + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin delete namespace monitoring-26137 ++ mktemp + local LAST_OUT=/tmp/tmp.eom6XRRNF2 ++ mktemp + local LAST_ERR=/tmp/tmp.YXvhSTXYsQ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace monitoring-26137 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace monitoring-26137 + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete namespace monitoring-26137 + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.eom6XRRNF2 + cat /tmp/tmp.YXvhSTXYsQ Error from server (NotFound): namespaces "monitoring-26137" not found + rm /tmp/tmp.eom6XRRNF2 /tmp/tmp.YXvhSTXYsQ + return 1 + : + wait_for_delete namespace/monitoring-26137 + local res=namespace/monitoring-26137 + set +o xtrace namespace/monitoring-26137 - Error from server (NotFound): namespaces "monitoring-26137" not found + kubectl_bin create namespace monitoring-26137 ++ mktemp + local LAST_OUT=/tmp/tmp.Wjvhuyc3K1 ++ mktemp + local LAST_ERR=/tmp/tmp.RTC6oPmEws + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl create namespace monitoring-26137 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Wjvhuyc3K1 namespace/monitoring-26137 created + cat /tmp/tmp.RTC6oPmEws + rm /tmp/tmp.Wjvhuyc3K1 /tmp/tmp.RTC6oPmEws + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.R0cWYhEYzd +++ mktemp ++ local LAST_ERR=/tmp/tmp.K884fKZrOi ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.R0cWYhEYzd ++ cat /tmp/tmp.K884fKZrOi ++ rm /tmp/tmp.R0cWYhEYzd /tmp/tmp.K884fKZrOi ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=monitoring-26137 ++ mktemp + local LAST_OUT=/tmp/tmp.qzrzkrkmdX ++ mktemp + local LAST_ERR=/tmp/tmp.9hnEWzH3rZ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl config set-context gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox --namespace=monitoring-26137 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.qzrzkrkmdX Context "gke_cloud-dev-112233_us-central1-c_jen-pg-723-7ed19e708-2-sandbox" modified. + cat /tmp/tmp.9hnEWzH3rZ + rm /tmp/tmp.qzrzkrkmdX /tmp/tmp.9hnEWzH3rZ + return 0 + deploy_operator + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local action=install + local namespace=monitoring-26137 + local dont_send_telemetry=false + local version_service_uri_override= + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + prepare_operator_yaml '' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml disabled install monitoring-26137 false + local pull_secret_name= + local operator_manifest=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml + local namespace_mode=disabled + local operator_action=install + local namespace=monitoring-26137 + local dont_send_telemetry=false + yq r -d2 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml 'data[values.yaml]' + /usr/bin/sed -e 's#^namespace: .*#namespace: "monitoring-26137"#g' + /usr/bin/sed -e 's#pgo_operator_namespace: .*#pgo_operator_namespace: "monitoring-26137"#g' + /usr/bin/sed -e 's#namespace_mode: .*#namespace_mode: "disabled"#g' + cat /dev/fd/63 - + /usr/bin/sed -e 's#^disable_telemetry: .*#disable_telemetry: "false"#g' ++ cut -d: -f1 ++ echo 'crunchy_debug: "true"' ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ echo perconalab/percona-postgresql-operator:PR-723-7ed19e708 ++ cut -d: -f2 + /usr/bin/sed -e 's#pgo_image_prefix: .*#pgo_image_prefix: "perconalab/percona-postgresql-operator"#g' + /usr/bin/sed -e 's#pgo_image_tag: .*#pgo_image_tag: "PR-723-7ed19e708"#g' + [[ -n '' ]] + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + yq w '-d*' /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/operator.yaml metadata.namespace monitoring-26137 + yq w -d4 - 'spec.template.spec.containers[0].image' perconalab/percona-postgresql-operator:PR-723-7ed19e708-pgo-deployer + yq w -d4 - 'spec.template.spec.containers[0].env[0].value' install + yq w -d3 - 'subjects[0].namespace' monitoring-26137 ++ cat /tmp/tmp.ufGAToIrqQ/operator.ini + yq w -d2 - -d2 'data[values.yaml]' 'crunchy_debug: "true" archive_mode: "true" archive_timeout: "60" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" create_rbac: "true" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_telemetry: "false" namespace: "monitoring-26137" namespace_mode: "disabled" pgo_image_prefix: "perconalab/percona-postgresql-operator" pgo_image_pull_policy: "Always" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" pgo_image_tag: "PR-723-7ed19e708" pgo_operator_namespace: "monitoring-26137"' + [[ -n '' ]] + kubectl_bin apply -f /tmp/tmp.ufGAToIrqQ/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Yfnga7EFEK ++ mktemp + local LAST_ERR=/tmp/tmp.QORE5p2Dlb + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.ufGAToIrqQ/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.Yfnga7EFEK serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.QORE5p2Dlb + rm /tmp/tmp.Yfnga7EFEK /tmp/tmp.QORE5p2Dlb + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-26137 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 16 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 17 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 18 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 19 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 20 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 21 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 22 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 23 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 24 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 25 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 26 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 27 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 28 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 29 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 30 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 31 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 32 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 33 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 34 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.v7BNb3u45S ++ mktemp + local LAST_ERR=/tmp/tmp.4m58ETXrqq + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.v7BNb3u45S serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.4m58ETXrqq + rm /tmp/tmp.v7BNb3u45S /tmp/tmp.4m58ETXrqq + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MLFY0uELA0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KX1DiM598A ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.MLFY0uELA0 ++ cat /tmp/tmp.KX1DiM598A ++ rm /tmp/tmp.MLFY0uELA0 /tmp/tmp.KX1DiM598A ++ return 0 + wait_pod postgres-operator-5978c54595-8jkhs + local pod=postgres-operator-5978c54595-8jkhs + local ns= + set +o xtrace postgres-operator-5978c54595-8jkhstrue + [[ -n '' ]] + deploy_helm monitoring-26137 + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + helm repo remove percona "percona" has been removed from your repositories + helm repo add percona https://percona-charts.storage.googleapis.com/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'create first PGO cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PGO cluster ----------------------------------------------------------------------------------- + cluster=some-name + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + platform=kubernetes + helm del --purge monitoring Error: unknown flag: --purge + : + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server --set platform=kubernetes https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Apr 24 19:20:33 2024 NAMESPACE: monitoring-26137 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-26137.svc.cluster.local:443 login: admin password: admin + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.SgTuuNynS9 ++ mktemp + local LAST_ERR=/tmp/tmp.4QeND2Cky1 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.SgTuuNynS9 + cat /tmp/tmp.4QeND2Cky1 error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.SgTuuNynS9 /tmp/tmp.4QeND2Cky1 + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.wr5fDOkINr ++ mktemp + local LAST_ERR=/tmp/tmp.VrIZ7Eowop + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.wr5fDOkINr + cat /tmp/tmp.VrIZ7Eowop + rm /tmp/tmp.wr5fDOkINr /tmp/tmp.VrIZ7Eowop + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZDWnfy3fAR +++ mktemp ++ local LAST_ERR=/tmp/tmp.MNJkQNayvM ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.ZDWnfy3fAR ++ cat /tmp/tmp.MNJkQNayvM ++ rm /tmp/tmp.ZDWnfy3fAR /tmp/tmp.MNJkQNayvM ++ return 0 + ADMIN_PASSWORD=admin + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.2kJozf4Ylr ++ mktemp + local LAST_ERR=/tmp/tmp.kZZiHM0vTJ + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.2kJozf4Ylr logger=settings t=2024-04-24T19:20:59.349431334Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2024-04-24T19:20:59.349667838Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2024-04-24T19:20:59.349685606Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2024-04-24T19:20:59.349693027Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2024-04-24T19:20:59.349701986Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2024-04-24T19:20:59.34970885Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2024-04-24T19:20:59.349715322Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2024-04-24T19:20:59.349721342Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2024-04-24T19:20:59.349727321Z level=info msg="App mode production" logger=sqlstore t=2024-04-24T19:20:59.34985051Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2024-04-24T19:20:59.376460288Z level=info msg="Starting DB migrations" logger=migrator t=2024-04-24T19:20:59.390184982Z level=info msg="migrations completed" performed=0 skipped=452 duration=1.322082ms logger=secrets t=2024-04-24T19:20:59.39344347Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2024-04-24T19:20:59.461550783Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2024-04-24T19:20:59.767774174Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2024-04-24T19:20:59.767869111Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2024-04-24T19:20:59.767907693Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2024-04-24T19:20:59.768015231Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2024-04-24T19:20:59.768039937Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2024-04-24T19:20:59.788175848Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.loader t=2024-04-24T19:20:59.788753851Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2024-04-24T19:20:59.788792528Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2024-04-24T19:20:59.809574873Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2024-04-24T19:20:59.809688508Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2024-04-24T19:20:59.80970392Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2024-04-24T19:20:59.809712506Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2024-04-24T19:20:59.809720814Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2024-04-24T19:20:59.809728659Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2024-04-24T19:20:59.809736313Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2024-04-24T19:20:59.809743837Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2024-04-24T19:20:59.809756166Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2024-04-24T19:20:59.809766971Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2024-04-24T19:20:59.809774562Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource Admin password changed successfully ✔ + cat /tmp/tmp.kZZiHM0vTJ + rm /tmp/tmp.2kJozf4Ylr /tmp/tmp.kZZiHM0vTJ + return 0 + spinup_pgcluster some-name /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/conf/cr.yaml + local cluster=some-name + local config=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/conf/cr.yaml + local backup=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local pgoClientFile=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + local namespace=monitoring-26137 + local root_via_pgbouncer= + desc 'create fresh PG cluster' + set +o xtrace ----------------------------------------------------------------------------------- create fresh PG cluster ----------------------------------------------------------------------------------- + create_user_secrets some-name monitoring-26137 + local cluster=some-name + local namespace=monitoring-26137 + yq r -d0 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml + yq w - metadata.name some-name-users-secret + kubectl_bin -n monitoring-26137 apply -f - + yq w - stringData.some-name some-name_pass ++ mktemp + local LAST_OUT=/tmp/tmp.8dyhdUVtMA ++ mktemp + local LAST_ERR=/tmp/tmp.lZuQegZIIY + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.8dyhdUVtMA secret/some-name-users-secret created + cat /tmp/tmp.lZuQegZIIY + rm /tmp/tmp.8dyhdUVtMA /tmp/tmp.lZuQegZIIY + return 0 + yq w - metadata.name some-name-pmm-secret + kubectl_bin -n monitoring-26137 apply -f - + yq r -d1 /mnt/jenkins/workspace/cloud-pg-operator_PR-723/deploy/users-secret.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.W0Ci7EcOQ7 ++ mktemp + local LAST_ERR=/tmp/tmp.airA6F4pJt + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.W0Ci7EcOQ7 secret/some-name-pmm-secret created + cat /tmp/tmp.airA6F4pJt + rm /tmp/tmp.W0Ci7EcOQ7 /tmp/tmp.airA6F4pJt + return 0 + kubectl_bin -n monitoring-26137 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.HAQaqp7O2k ++ mktemp + local LAST_ERR=/tmp/tmp.5qHP9Dk3ho + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/conf/client.yml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.HAQaqp7O2k deployment.apps/pg-client created + cat /tmp/tmp.5qHP9Dk3ho + rm /tmp/tmp.HAQaqp7O2k /tmp/tmp.5qHP9Dk3ho + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/conf/cr.yaml some-name false false false false false false false false + local path=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/conf/cr.yaml + local name=some-name + local backup=false + local restore_from=false + local restore_opts=false + local custom_config=false + local podAntiAffinity=false + local schedule=false + local schedule_type=false + local tls_only=false + local root_via_pgbouncer= + yq w /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/conf/cr.yaml metadata.annotations.current-primary some-name + yq w - metadata.labels.deployment-name some-name + yq w - spec.user some-name + yq w - spec.keepBackups false + yq w - spec.name some-name + yq w - metadata.labels.name some-name + yq w - spec.secretsName some-name-users-secret + yq w - spec.clustername some-name + yq w - metadata.labels.pg-cluster some-name + yq w - spec.backup.image perconalab/percona-postgresql-operator:main-ppg14-pgbackrest + yq w - spec.pmm.image perconalab/pmm-client:2.41.0 + yq w - spec.pgPrimary.image perconalab/percona-postgresql-operator:main-ppg14-postgres-ha + yq w - metadata.labels.crunchy-pgha-scope some-name + yq w - spec.keepData false + yq w - spec.backup.backrestRepoImage perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo + yq w - spec.pgBadger.image perconalab/percona-postgresql-operator:main-ppg14-pgbadger + yq w - spec.pgBouncer.image perconalab/percona-postgresql-operator:main-ppg14-pgbouncer + yq w - spec.database some-name + yq w - metadata.name some-name + case ${podAntiAffinity} in + case ${backup} in + [[ false != \f\a\l\s\e ]] + [[ false != \f\a\l\s\e ]] + [[ false == \t\r\u\e ]] + [[ '' == \t\r\u\e ]] + kubectl_bin apply -f /tmp/tmp.ufGAToIrqQ/cr.yaml -n monitoring-26137 ++ mktemp + local LAST_OUT=/tmp/tmp.OYi2haevuS ++ mktemp + local LAST_ERR=/tmp/tmp.BPenFHatxk + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.ufGAToIrqQ/cr.yaml -n monitoring-26137 + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.OYi2haevuS perconapgcluster.pg.percona.com/some-name created + cat /tmp/tmp.BPenFHatxk + rm /tmp/tmp.OYi2haevuS /tmp/tmp.BPenFHatxk + return 0 + wait_deployment some-name-backrest-shared-repo + local name=some-name-backrest-shared-repo + local target_namespace=monitoring-26137 + sleep 10 + set +o xtrace some-name-backrest-shared-repo.. + wait_deployment some-name + local name=some-name + local target_namespace=monitoring-26137 + sleep 10 + set +o xtrace some-name........ + [[ false == \g\c\s\+* ]] + wait_job_completion some-name-stanza-create false + local job=some-name-stanza-create + local mandatory=false + local ns=monitoring-26137 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' some-name-stanza-create+ retry=0 ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/some-name-stanza-create -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_job_completion backrest-backup-some-name + local job=backrest-backup-some-name + local mandatory=true + local ns=monitoring-26137 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' + sleep 1 + echo -n . .+ let retry+=1 + [[ 1 -ge 120 ]] + [[ 1 -ge 120 ]] + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' backrest-backup-some-name+ retry=0 ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 13 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 14 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 15 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/backrest-backup-some-name -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + wait_cluster_consistency some-name + cluster_name=some-name + wait_cluster_status some-name 'pgcluster Initialized' + local cluster_name=some-name + local 'target_status=pgcluster Initialized' + local target_namespace=monitoring-26137 + local status_query= + case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in ++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w7f6bb5vPE +++ mktemp ++ local LAST_ERR=/tmp/tmp.DZjtZ8Rw4w ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.w7f6bb5vPE ++ cat /tmp/tmp.DZjtZ8Rw4w ++ rm /tmp/tmp.w7f6bb5vPE /tmp/tmp.DZjtZ8Rw4w ++ return 0 + status_query='kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o jsonpath='\''{.status.PGCluster.state}'\''' + local retry=0 ++ eval kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Yq0PqnE9wK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C3CjUNzx5m +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Yq0PqnE9wK +++ cat /tmp/tmp.C3CjUNzx5m +++ rm /tmp/tmp.Yq0PqnE9wK /tmp/tmp.C3CjUNzx5m +++ return 0 + [[ pgcluster Processed == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + let retry+=1 + '[' 1 -ge 24 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 10 ++ eval kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath='\''{.status.PGCluster.state}'\''' +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CAaiQgIjgW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Fsg6md4hl4 +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.status.PGCluster.state}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.CAaiQgIjgW +++ cat /tmp/tmp.Fsg6md4hl4 +++ rm /tmp/tmp.CAaiQgIjgW /tmp/tmp.Fsg6md4hl4 +++ return 0 + [[ pgcluster Initialized == \p\g\c\l\u\s\t\e\r\ \I\n\i\t\i\a\l\i\z\e\d ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-26137 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jTpSg3nBX7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2yxX5KouMx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.jTpSg3nBX7 +++ cat /tmp/tmp.2yxX5KouMx +++ rm /tmp/tmp.jTpSg3nBX7 /tmp/tmp.2yxX5KouMx +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OGcExYwwtV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BbZ2QdY96I +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.OGcExYwwtV +++ cat /tmp/tmp.BbZ2QdY96I +++ rm /tmp/tmp.OGcExYwwtV /tmp/tmp.BbZ2QdY96I +++ return 0 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.baODFYVHww ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ncl4Zll8VH +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.baODFYVHww +++ cat /tmp/tmp.ncl4Zll8VH +++ rm /tmp/tmp.baODFYVHww /tmp/tmp.ncl4Zll8VH +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xK563rW2UU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qWtESl4GSE ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.xK563rW2UU ++++ cat /tmp/tmp.qWtESl4GSE ++++ rm /tmp/tmp.xK563rW2UU /tmp/tmp.qWtESl4GSE ++++ return 0 +++ kubectl_bin -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VrrbOsunR2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2lly7QPlrO +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.VrrbOsunR2 +++ cat /tmp/tmp.2lly7QPlrO +++ rm /tmp/tmp.VrrbOsunR2 /tmp/tmp.2lly7QPlrO +++ return 0 ++ echo postgres_password + run_psql 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:postgres_password@some-name.monitoring-26137 + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local uri=postgres:postgres_password@some-name.monitoring-26137 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.go9yXbETcI +++ mktemp ++ local LAST_ERR=/tmp/tmp.a43Y1Bhdl5 ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.go9yXbETcI ++ cat /tmp/tmp.a43Y1Bhdl5 ++ rm /tmp/tmp.go9yXbETcI /tmp/tmp.a43Y1Bhdl5 ++ return 0 + local client_container=pg-client-76df9df876-snvnf + kubectl_bin exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.9fQeC38peT ++ mktemp + local LAST_ERR=/tmp/tmp.GDM4doHZH8 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.9fQeC38peT + cat /tmp/tmp.GDM4doHZH8 + rm /tmp/tmp.9fQeC38peT /tmp/tmp.GDM4doHZH8 + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-26137 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gy7LsPizmC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fvkXRE9dqx +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Gy7LsPizmC +++ cat /tmp/tmp.fvkXRE9dqx +++ rm /tmp/tmp.Gy7LsPizmC /tmp/tmp.fvkXRE9dqx +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6CLMIq0n5s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9GrlDxUZET +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.6CLMIq0n5s +++ cat /tmp/tmp.9GrlDxUZET +++ rm /tmp/tmp.6CLMIq0n5s /tmp/tmp.9GrlDxUZET +++ return 0 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eOnklVAP4c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1U9dBlrF4g +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.eOnklVAP4c +++ cat /tmp/tmp.1U9dBlrF4g +++ rm /tmp/tmp.eOnklVAP4c /tmp/tmp.1U9dBlrF4g +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EdstwmcJQK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2r2mq9Nbjt ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.EdstwmcJQK ++++ cat /tmp/tmp.2r2mq9Nbjt ++++ rm /tmp/tmp.EdstwmcJQK /tmp/tmp.2r2mq9Nbjt ++++ return 0 +++ kubectl_bin -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DmykM1OwJ2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A32jgNegWM +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.DmykM1OwJ2 +++ cat /tmp/tmp.A32jgNegWM +++ rm /tmp/tmp.DmykM1OwJ2 /tmp/tmp.A32jgNegWM +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:postgres_password@some-name.monitoring-26137 + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' + local uri=postgres:postgres_password@some-name.monitoring-26137 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bX1akRMCSW +++ mktemp ++ local LAST_ERR=/tmp/tmp.BfwE3GytjY ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.bX1akRMCSW ++ cat /tmp/tmp.BfwE3GytjY ++ rm /tmp/tmp.bX1akRMCSW /tmp/tmp.BfwE3GytjY ++ return 0 + local client_container=pg-client-76df9df876-snvnf + kubectl_bin exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.y3s2wnMK1H ++ mktemp + local LAST_ERR=/tmp/tmp.ESQBHVDimI + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.y3s2wnMK1H + cat /tmp/tmp.ESQBHVDimI + rm /tmp/tmp.y3s2wnMK1H /tmp/tmp.ESQBHVDimI + return 0 ++ get_psql_user_pass postgres some-name ++ local user=postgres ++ local cluster=some-name ++ local namespace=monitoring-26137 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1uQtXq92Se ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pzbVKNs59y +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.metadata.name}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.1uQtXq92Se +++ cat /tmp/tmp.pzbVKNs59y +++ rm /tmp/tmp.1uQtXq92Se /tmp/tmp.pzbVKNs59y +++ return 0 ++ [[ xsome-name == \x ]] ++ case $(kubectl_bin get perconapgcluster/${cluster_name} -o jsonpath='{.metadata.labels.pgo-version}') in +++ kubectl_bin get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zfBrRg3Qpw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3cN2uFagGK +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get perconapgcluster/some-name -o 'jsonpath={.metadata.labels.pgo-version}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.zfBrRg3Qpw +++ cat /tmp/tmp.3cN2uFagGK +++ rm /tmp/tmp.zfBrRg3Qpw /tmp/tmp.3cN2uFagGK +++ return 0 +++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.djgymaNgR5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.54v0LWtBhp +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.djgymaNgR5 +++ cat /tmp/tmp.54v0LWtBhp +++ rm /tmp/tmp.djgymaNgR5 /tmp/tmp.54v0LWtBhp +++ return 0 ++ [[ xsome-name-users-secret == \x ]] +++ base64 -d ++++ kubectl_bin -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OVtBRbNVif +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Jg6ptnT64N ++++ local exit_status=0 ++++ local timeout=4 ++++ set +o errexit +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++++ kubectl -n monitoring-26137 get perconapgcluster/some-name -o 'jsonpath={.spec.secretsName}' ++++ exit_status=0 ++++ [[ 0 != 0 ]] ++++ break ++++ set -o errexit ++++ cat /tmp/tmp.OVtBRbNVif ++++ cat /tmp/tmp.Jg6ptnT64N ++++ rm /tmp/tmp.OVtBRbNVif /tmp/tmp.Jg6ptnT64N ++++ return 0 +++ kubectl_bin -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Wlg2HECmQj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.L2P42fOhjD +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl -n monitoring-26137 get secret/some-name-users-secret -o 'jsonpath={.data.postgres}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.Wlg2HECmQj +++ cat /tmp/tmp.L2P42fOhjD +++ rm /tmp/tmp.Wlg2HECmQj /tmp/tmp.L2P42fOhjD +++ return 0 ++ echo postgres_password + run_psql '\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' postgres:postgres_password@some-name.monitoring-26137 + local 'command=\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";' + local uri=postgres:postgres_password@some-name.monitoring-26137 + local driver=postgres + local suffix=.svc.cluster.local ++ kubectl_bin -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vRptIQMHmg +++ mktemp ++ local LAST_ERR=/tmp/tmp.QZn2WZ0GhZ ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl -n monitoring-26137 get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.vRptIQMHmg ++ cat /tmp/tmp.QZn2WZ0GhZ ++ rm /tmp/tmp.vRptIQMHmg /tmp/tmp.QZn2WZ0GhZ ++ return 0 + local client_container=pg-client-76df9df876-snvnf + kubectl_bin exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' ++ mktemp + local LAST_OUT=/tmp/tmp.EtkegMssEO ++ mktemp + local LAST_ERR=/tmp/tmp.EEdMXHgRh5 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl exec pg-client-76df9df876-snvnf -n monitoring-26137 -- bash -c 'printf '\''\c myapp \\\ GRANT SELECT,INSERT ON myApp to "some-name";GRANT USAGE ON SCHEMA public TO "some-name";\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://postgres:postgres_password@some-name.monitoring-26137.svc.cluster.local' + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.EtkegMssEO + cat /tmp/tmp.EEdMXHgRh5 + rm /tmp/tmp.EtkegMssEO /tmp/tmp.EEdMXHgRh5 + return 0 + sleep 10 + desc 'compare deployment' + set +o xtrace ----------------------------------------------------------------------------------- compare deployment ----------------------------------------------------------------------------------- + compare_kubectl deployment/some-name + local resource=deployment/some-name + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/compare/deployment_some-name.yml + local new_result=/tmp/tmp.ufGAToIrqQ/deployment_some-name.yml + [[ GKE == \O\P\E\N\S\H\I\F\T ]] + kubectl_bin get -o yaml deployment/some-name + yq d - '**.creationTimestamp' ++ mktemp + yq d - '**.uid' + local LAST_OUT=/tmp/tmp.oJrG0tqrQi + yq d - '**.namespace' + yq d - metadata.resourceVersion + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - metadata.selfLink ++ mktemp + yq d - metadata.deletionTimestamp + local LAST_ERR=/tmp/tmp.yikpJujPiz + local exit_status=0 + local timeout=4 + set +o errexit + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."batch.kubernetes.io/job-tracking"' + yq d - '**.dataSource' + yq d - '**."batch.kubernetes.io/controller-uid"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - 'spec.template.metadata.labels."batch.kubernetes.io/job-name"' + yq d - '**.volumeName' ++ seq 0 2 + yq d - '**.creationTimestamp' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.image' + yq d - '**.storageClassName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.controller-uid' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - '**.internalTrafficPolicy' + yq d - '**.allocateLoadBalancerNodePorts' + yq d - '**.procMount' + yq d - '**.ipFamilies' + yq d - '**.healthCheckNodePort' + yq d - '**."percona.com/*"' + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.(name==suffix)' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==COMMAND_OPTS)' + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get -o yaml deployment/some-name + yq d - '**.(name==PODNAME)' + yq d - '**.ipFamilyPolicy' + yq d - status + yq d - '**.nodePort' + yq d - '**.enableServiceLinks' + yq d - '**.imagePullSecrets' + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oJrG0tqrQi + cat /tmp/tmp.yikpJujPiz + rm /tmp/tmp.oJrG0tqrQi /tmp/tmp.yikpJujPiz + return 0 + case $(yq r ${new_result} 'kind') in ++ yq r /tmp/tmp.ufGAToIrqQ/deployment_some-name.yml kind + diff -u /mnt/jenkins/workspace/cloud-pg-operator_PR-723/e2e-tests/monitoring/compare/deployment_some-name.yml /tmp/tmp.ufGAToIrqQ/deployment_some-name.yml + desc 'check metrics' + set +o xtrace ----------------------------------------------------------------------------------- check metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds some-name admin:admin + local metric=node_boot_time_seconds + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1713986576 ++ /usr/bin/date -u +%s + local end=1713986636 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dnhohvVIl1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3uDQEWq1pJ +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.dnhohvVIl1 +++ cat /tmp/tmp.3uDQEWq1pJ +++ rm /tmp/tmp.dnhohvVIl1 /tmp/tmp.3uDQEWq1pJ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.STA9oM2WYf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EqhEudCjGX +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.STA9oM2WYf +++ cat /tmp/tmp.EqhEudCjGX +++ rm /tmp/tmp.STA9oM2WYf /tmp/tmp.EqhEudCjGX +++ return 0 ++ local ip=34.28.132.220 ++ '[' -n 34.28.132.220 -a 34.28.132.220 '!=' null ']' ++ echo 34.28.132.220 ++ return + local endpoint=34.28.132.220 + curl -s -k 'https://admin:admin@34.28.132.220/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22some-name%22%7D%29&start=1713986576&end=1713986636&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "1713975645" "1713975645" + desc 'check qan20 metrics' + set +o xtrace ----------------------------------------------------------------------------------- check qan20 metrics ----------------------------------------------------------------------------------- + get_qan20_values some-name admin:admin + local instance=some-name + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2024-04-24T18:53:57 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2024-04-24T19:23:57 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pPAgwvMIMm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ChovNGFWSy +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.pPAgwvMIMm +++ cat /tmp/tmp.ChovNGFWSy +++ rm /tmp/tmp.pPAgwvMIMm /tmp/tmp.ChovNGFWSy +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GJ5n94QyCm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Bir6Lmxplw +++ local exit_status=0 +++ local timeout=4 +++ set +o errexit ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ set -o errexit +++ cat /tmp/tmp.GJ5n94QyCm +++ cat /tmp/tmp.Bir6Lmxplw +++ rm /tmp/tmp.GJ5n94QyCm /tmp/tmp.Bir6Lmxplw +++ return 0 ++ local ip=34.28.132.220 ++ '[' -n 34.28.132.220 -a 34.28.132.220 '!=' null ']' ++ echo 34.28.132.220 ++ return + local endpoint=34.28.132.220 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@34.28.132.220/v0/qan/GetReport + jq '.rows[].sparkline' [ { "time_frame": 360, "timestamp": "2024-04-24T19:23:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2024-04-24T19:17:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2024-04-24T19:11:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2024-04-24T19:05:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2024-04-24T18:59:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2024-04-24T18:53:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2024-04-24T18:47:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2024-04-24T18:41:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2024-04-24T18:35:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2024-04-24T18:29:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2024-04-24T18:23:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2024-04-24T18:17:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2024-04-24T18:11:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2024-04-24T18:05:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2024-04-24T17:59:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2024-04-24T17:53:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2024-04-24T17:47:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2024-04-24T17:41:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2024-04-24T17:35:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2024-04-24T17:29:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2024-04-24T17:23:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2024-04-24T17:17:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2024-04-24T17:11:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2024-04-24T17:05:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2024-04-24T16:59:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2024-04-24T16:53:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2024-04-24T16:47:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2024-04-24T16:41:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2024-04-24T16:35:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2024-04-24T16:29:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2024-04-24T16:23:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2024-04-24T16:17:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2024-04-24T16:11:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2024-04-24T16:05:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2024-04-24T15:59:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2024-04-24T15:53:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2024-04-24T15:47:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2024-04-24T15:41:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2024-04-24T15:35:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2024-04-24T15:29:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2024-04-24T15:23:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2024-04-24T15:17:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2024-04-24T15:11:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2024-04-24T15:05:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2024-04-24T14:59:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2024-04-24T14:53:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2024-04-24T14:47:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2024-04-24T14:41:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2024-04-24T14:35:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2024-04-24T14:29:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2024-04-24T14:23:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2024-04-24T14:17:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2024-04-24T14:11:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2024-04-24T14:05:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2024-04-24T13:59:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2024-04-24T13:53:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2024-04-24T13:47:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2024-04-24T13:41:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2024-04-24T13:35:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2024-04-24T13:29:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2024-04-24T13:23:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2024-04-24T13:17:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2024-04-24T13:11:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2024-04-24T13:05:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2024-04-24T12:59:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2024-04-24T12:53:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2024-04-24T12:47:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2024-04-24T12:41:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2024-04-24T12:35:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2024-04-24T12:29:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2024-04-24T12:23:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2024-04-24T12:17:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2024-04-24T12:11:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2024-04-24T12:05:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2024-04-24T11:59:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2024-04-24T11:53:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2024-04-24T11:47:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2024-04-24T11:41:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2024-04-24T11:35:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2024-04-24T11:29:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2024-04-24T11:23:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2024-04-24T11:17:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2024-04-24T11:11:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2024-04-24T11:05:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2024-04-24T10:59:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2024-04-24T10:53:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2024-04-24T10:47:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2024-04-24T10:41:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2024-04-24T10:35:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2024-04-24T10:29:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2024-04-24T10:23:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2024-04-24T10:17:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2024-04-24T10:11:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2024-04-24T10:05:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2024-04-24T09:59:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2024-04-24T09:53:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2024-04-24T09:47:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2024-04-24T09:41:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2024-04-24T09:35:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2024-04-24T09:29:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2024-04-24T09:23:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2024-04-24T09:17:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2024-04-24T09:11:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2024-04-24T09:05:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2024-04-24T08:59:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2024-04-24T08:53:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2024-04-24T08:47:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2024-04-24T08:41:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2024-04-24T08:35:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2024-04-24T08:29:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2024-04-24T08:23:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2024-04-24T08:17:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2024-04-24T08:11:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2024-04-24T08:05:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2024-04-24T07:59:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2024-04-24T07:53:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2024-04-24T07:47:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2024-04-24T07:41:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2024-04-24T07:35:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2024-04-24T07:29:00Z" } ] + rm -f payload.json + [[ -n '' ]] + helm delete monitoring release "monitoring" uninstalled + destroy monitoring-26137 + grep -v level=info + grep -v level=debug ++ get_operator_pod ++ kubectl_bin get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v 'Getting pods from source' + grep -v 'Getting tasks for pod' +++ mktemp + grep -v 'the object has been modified' + sort -u + tee /tmp/tmp.ufGAToIrqQ/operator.log + grep -v 'get backup status: Job.batch' ++ local LAST_OUT=/tmp/tmp.4bnz8GmjdV +++ mktemp ++ local LAST_ERR=/tmp/tmp.UuIQgTpoEV ++ local exit_status=0 ++ local timeout=4 ++ set +o errexit +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox ++ kubectl get pods --selector=name=postgres-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ set -o errexit ++ cat /tmp/tmp.4bnz8GmjdV ++ cat /tmp/tmp.UuIQgTpoEV ++ rm /tmp/tmp.4bnz8GmjdV /tmp/tmp.UuIQgTpoEV ++ return 0 + kubectl_bin logs postgres-operator-5978c54595-8jkhs -c operator ++ mktemp + local LAST_OUT=/tmp/tmp.CTiGOX1Vdr ++ mktemp + local LAST_ERR=/tmp/tmp.UR0uEz3nx2 + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl logs postgres-operator-5978c54595-8jkhs -c operator + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.CTiGOX1Vdr + cat /tmp/tmp.UR0uEz3nx2 + rm /tmp/tmp.CTiGOX1Vdr /tmp/tmp.UR0uEz3nx2 + return 0 ] { } }, ] } }, { } }, { } ] } } ] } { } }, }, { ] { } }, ] ] ], { } }, }, { }, { },{ }] }], ] { } }, , ], { } }, }], { } }, }], } ], } }, } }, } }, }, { } }, }, 2024/04/24 19:21:04 INF 1 (localhost:4150) connecting to nsqd 2024/04/24 19:21:48 INF 2 (localhost:4150) connecting to nsqd 2024/04/24 19:21:49 INF 3 (localhost:4150) connecting to nsqd 2024/04/24 19:21:56 INF 4 (localhost:4150) connecting to nsqd 2024/04/24 19:22:23 INF 5 (localhost:4150) connecting to nsqd "accessModes": [ "affinity": "affinity": { "affinity": { "podAntiAffinity": { "allowPrivilegeEscalation": false, "annotations": {"keep-backups":"false","keep-data":"false"}, "apiVersion": "apps/v1", "apiVersion": "batch/v1", "apiVersion": "v1", "apiVersion": "v1", "apiVersion": "v1", "backrest-command": "backup", "backrest-command": "stanza-create", "claimName": "some-name-pgbr-repo" "command": [ "configMap": { "configMap": { "configMapKeyRef": { "containerName": "database", "containerPort": 2022, "containerPort": 5432, "containerPort": 8009, "containers": [ "containers": [{ "cpu": "1", "cpu": "2", "crunchydata" "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgbouncer": "true", "crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name","deployment-name": "some-name","pgouser": "admin" "defaultMode": 288 "defaultMode": 420, "deployment-name": "some-name" "deployment-name": "some-name","pgouser": "admin","crunchy-pgha-scope": "some-name","pgo-version": "1.6.0","name": "some-name","pg-cluster": "some-name" "divisor": "1m", "dnsPolicy": "ClusterFirst" "downwardAPI": { "emptyDir": { "emptyDir": { "emptyDir": { "emptyDir": { "env": [ "env": [{ "exec": { "fieldPath": "metadata.annotations" "fieldPath": "metadata.labels" "fieldPath": "metadata.labels['crunchy-pgha-scope']" "fieldPath": "metadata.namespace" "fieldRef": { "fieldRef": { "fsGroup": 2, I0424 19:20:00.127150 1 shared_informer.go:240] Waiting for caches to sync for namespace I0424 19:20:00.227679 1 shared_informer.go:247] Caches are synced for namespace I0424 19:20:00.329700 1 shared_informer.go:240] Waiting for caches to sync for monitoring-26137 I0424 19:20:00.531402 1 shared_informer.go:247] Caches are synced for monitoring-26137 "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo", "image": "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer", "image": "perconalab/percona-postgresql-operator:main-ppg14-postgres-ha", "imagePullPolicy": "IfNotPresent" "imagePullPolicy": "IfNotPresent", "initialDelaySeconds": 15 "initialDelaySeconds": 30, "items": [ "items": [ "key": "config", "key": "config", "key": "crunchy-pgbouncer", "key": "init" "key": "password" "key": "pg-cluster", "key": "pgo-backrest", "key": "pgo-backrest-job", "key": "pgo-pg-database", "key": "pg-pod-anti-affinity", "key": "vendor", "kind": "Deployment", "kind": "Job", "kind": "PersistentVolumeClaim", "kind": "Service", "labels": { "labels": { "labels": { "labelSelector": { "limits": { "livenessProbe": { "matchExpressions": [ "matchLabels": { "matchLabels": { "maxSurge": 1 "maxUnavailable": 1, "medium": "Memory", "medium": "Memory" "medium": "Memory", "medium": "Memory", "medium": "Memory", "memory": "128Mi" "memory": "48Mi" "memory": "512Mi" "metadata": { "metadata": { "metadata": { "mountPath": "/backrestrepo", "mountPath": "/dev/shm", "mountPath": "/etc/pgbackrest" "mountPath": "/etc/pgbackrest/conf.d", "mountPath": "/etc/podinfo", "mountPath": "/etc/ssh", "mountPath": "/etc/ssh", "mountPath": "/home/pgbackrest" "mountPath": "/pgconf", "mountPath": "/pgconf/", "mountPath": "/pgconf/pgreplicator", "mountPath": "/pgconf/pgsuper", "mountPath": "/pgconf/pguser", "mountPath": "/pgdata", "mountPath": "/sshd", "mountPath": "/sshd", "mountPath": "/tmp", "mountPath": "/tmp", "mountPath": "/tmp" "name": "backrest", "name": "backrest-backup-some-name", "name": "backrestrepo", "name": "backrestrepo", "name": "BACKREST_SKIP_CREATE_STANZA", "name": "COMMAND", "name": "COMMAND_OPTS", "name": "database", "name": "dshm" "name": "dshm", "name": "ENABLE_SSHD", "name": "LD_PRELOAD", "name": "LD_PRELOAD", "name": "MODE", "name": "NAMESPACE", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_GROUP", "name": "NSS_WRAPPER_PASSWD", "name": "NSS_WRAPPER_PASSWD", "name": "patroni" "name": "PATRONI_KUBERNETES_LABELS", "name": "PATRONI_KUBERNETES_NAMESPACE", "name": "PATRONI_KUBERNETES_SCOPE_LABEL", "name": "PATRONI_LOG_LEVEL", "name": "PATRONI_POSTGRESQL_DATA_DIR", "name": "PATRONI_SCOPE", "name": "pgbackrest" "name": "pgbackrest-conf", "name": "pgbackrest-conf", "name": "pgbackrest-config" "name": "pgbackrest-config", "name": "PGBACKREST_DB_HOST", "name": "PGBACKREST_DB_PATH", "name": "PGBACKREST_DB_PATH", "name": "pgbackrest-home", "name": "pgbackrest-home", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_LOG_PATH", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_PORT", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_PG1_SOCKET_PATH", "name": "PGBACKREST_REPO1_HOST", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_PATH", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_REPO1_TYPE", "name": "PGBACKREST_STANZA", "name": "PGBACKREST_STANZA", "name": "pgbouncer" "name": "pgbouncer", "name": "pgbouncer-conf", "name": "pgbouncer-conf", "name": "pgconf-volume" "name": "pgconf-volume", "name": "pgdata", "name": "pgdata", "name": "PGHA_DATABASE", "name": "PGHA_INIT", "name": "PGHA_PASSWORD_TYPE", "name": "PGHA_PGBACKREST", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_GCS_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "name": "PGHA_PGBACKREST_S3_VERIFY_TLS", "name": "PGHA_PG_PORT", "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "name": "PGHA_STANDBY", "name": "PGHA_SYNC_REPLICATION", "name": "PGHA_TLS_ENABLED", "name": "PGHA_TLS_ONLY", "name": "PGHA_USER", "name": "PGHOST", "name": "PG_PASSWORD", "name": "PG_PRIMARY_SERVICE_NAME", "name": "PITR_TARGET", , {"name":"pmm-client","image":"perconalab/pmm-client:2.41.0","ports":[{"containerPort":7777,"protocol":"TCP"},{"containerPort":30100},{"containerPort":30101},{"containerPort":30102},{"containerPort":30103},{"containerPort":30104},{"containerPort":30105}],"env":[{"name":"PMM_USER","value":"admin"},{"name":"PMM_SERVER","value":"monitoring-service"},{"name":"CLIENT_PORT_LISTEN","value":"7777"},{"name":"CLIENT_PORT_MIN","value":"30100"},{"name":"CLIENT_PORT_MAX","value":"30105"},{"name":"POD_NAME","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.name"}}},{"name":"POD_NAMESPASE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}},{"name":"PMM_AGENT_SERVER_ADDRESS","value":"monitoring-service"},{"name":"PMM_AGENT_SERVER_USERNAME","value":"admin"},{"name":"PMM_AGENT_SERVER_PASSWORD","valueFrom":{"secretKeyRef":{"name":"some-name-pmm-secret","key":"password"}}},{"name":"PMM_AGENT_LISTEN_PORT","value":"7777"},{"name":"PMM_AGENT_PORTS_MIN","value":"30100"},{"name":"PMM_AGENT_PORTS_MAX","value":"30105"},{"name":"PMM_AGENT_CONFIG_FILE","value":"/usr/local/percona/pmm2/config/pmm-agent.yaml"},{"name":"PMM_AGENT_SERVER_INSECURE_TLS","value":"1"},{"name":"PMM_AGENT_LISTEN_ADDRESS","value":"0.0.0.0"},{"name":"PMM_AGENT_SETUP_NODE_NAME","value":"some-name"},{"name":"PMM_AGENT_SETUP_METRICS_MODE","value":"push"},{"name":"PMM_AGENT_SETUP","value":"1"},{"name":"PMM_AGENT_SETUP_FORCE","value":"1"},{"name":"PMM_AGENT_SETUP_NODE_TYPE","value":"container"},{"name":"DB_TYPE","value":"postgresql"},{"name":"PMM_AGENT_SIDECAR","value":"true"},{"name":"PMM_AGENT_SIDECAR_SLEEP","value":"5"},{"name":"DB_PASS","valueFrom":{"secretKeyRef":{"name":"some-name-postgres-secret","key":"password"}}},{"name":"PMM_AGENT_PATHS_TEMPDIR","value":"/tmp"},{"name":"PMM_AGENT_PRERUN_SCRIPT","value":"pmm-admin status --wait=10s;\npmm-admin add postgresql --skip-connection-check --metrics-mode=push --username=postgres --password=$(DB_PASS) --service-name=$(PMM_AGENT_SETUP_NODE_NAME) --host=$(POD_NAME) --port=5432 --query-source=pgstatmonitor;\npmm-admin annotate --service-name=$(PMM_AGENT_SETUP_NODE_NAME) 'Service restarted'"}],"resources":{},"livenessProbe":{"httpGet":{"path":"/local/Status","port":7777,"scheme":"HTTP"},"initialDelaySeconds":60,"timeoutSeconds":5,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"lifecycle":{"preStop":{"exec":{"command":["bash","-c","pmm-admin inventory remove node --force $(pmm-admin status --json | python -c \"import sys, json; print(json.load(sys.stdin)['pmm_agent_status']['node_id'])\")"]}}},"imagePullPolicy":"IfNotPresent"}], "name": "podinfo" "name": "podinfo", "name": "PODNAME", "name": "postgres" "name": "primary-volume" "name": "primary-volume", "name": "report", "name": "root-volume" "name": "root-volume", "name": "some-name", "name": "some-name", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo" "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-backrest-shared-repo", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer", "name": "some-name-pgbouncer-cm" "name": "some-name-pgbouncer-secret", "name": "some-name-pgbouncer-secret", "name": "some-name-pgbr-repo", "name": "some-name-pgha-config", "name": "some-name-pgha-config", "name": "some-name-stanza-create", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "ssh-config", "name": "sshd", "name": "sshd", "name": "sshd", "name": "sshd", "name": "tmp" "name": "tmp" "name": "tmp", "name": "tmp", "name": "tmp", "name": "user-volume" "name": "user-volume", "nodePort": 0 "operator": "Exists" "operator": "In", "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" "optional": true "path": "annotations" "path": "cpu_limit", "path": "cpu_request", "path": "labels" "path": "mem_limit", "path": "mem_request", "path": "ssh_config" "path": "ssh_config" "periodSeconds": 15, "persistentVolumeClaim": { "persistentVolumeClaim":{"claimName":"some-name"} "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pg-cluster": "some-name" "pg-cluster": "some-name", "pgo-backrest-job": "true", "pgo-backrest-repo": "true" "pgo-backrest-repo": "true", "pgo-backrest-repo": "true" "pgo-backrest": "true", "pgo-pg-database": "true", "pgo-pg-database": "true", "pg-pod-anti-affinity": "preferred", "pg-pod-anti-affinity": "preferred", "podAffinityTerm": { "podAntiAffinity": { "podAntiAffinity": { "podAntiAffinity": { "port": 2022, "ports": [{ "ports": [{ "preferredDuringSchedulingIgnoredDuringExecution": [ "privileged": false, "projected": { "projected": { "projected": { "sources": [] } "protocol": "TCP", "protocol": "TCP", "readinessProbe": { "readOnly": false "readOnly": false "readOnly": false "readOnlyRootFilesystem": true "readOnly": true "readOnly": true "readOnly": true "ReadWriteOnce" "replicas": 0, "replicas": 1, "requests": { "requests": { "resourceFieldRef": { "resource": "limits.cpu" "resource": "limits.memory" "resource": "requests.cpu" "resource": "requests.memory" "resources": { "resources": { "restartPolicy": "Always", "restartPolicy": "Never" "rollingUpdate": { "runAsNonRoot": true "secret": { "secret": { "secret": { "secret": { "secretKeyRef": { "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-backrest-repo-config" "secretName": "some-name-backrest-repo-config", "secretName": "some-name-postgres-secret" "secretName": "some-name-primaryuser-secret" "secretName": "some-name-some-name-secret" "securityContext": { "securityContext": { "securityContext": {}, "securityContext": {"fsGroup": 26,"supplementalGroups": [1001]}, "securityContext": {"runAsNonRoot":true,"fsGroup":26}, "selector": { "selector": { "serviceAccountName": "pgo-backrest", "serviceAccountName": "pgo-default", "serviceAccountName": "pgo-pg", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-backrest-shared-repo", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "service-name": "some-name-pgbouncer", "sessionAffinity": "None" "sizeLimit": "128Ki" "sizeLimit": "16Mi" "sizeLimit": "1Mi" "sizeLimit": "64Mi" "sizeLimit": "64Mi" "some-name" "sources": [ "sources": [ "spec": { "spec": { "spec": { "storage": "1G" "strategy": { "targetPort": 2022, "template": { time="2024-04-24T19:19:59Z" level=warning msg="PGAdminStorage setting not set, will use PrimaryStorage setting" func="github.com/percona/percona-postgresql-operator/internal/config.(*PgoConfig).Validate()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/config/pgoconfig.go:337" version=1.6.0 time="2024-04-24T19:21:48Z" level=error msg="Error completing workflow some-name-createcluster" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:48" version=1.6.0 time="2024-04-24T19:21:48Z" level=error msg="pgtasks.pg.percona.com \"some-name-createcluster\" not found" func="github.com/percona/percona-postgresql-operator/internal/operator/task.completeWorkflow()" file="/go/src/github.com/percona/percona-postgresql-operator/internal/operator/task/workflow.go:49" version=1.6.0 "timeoutSeconds": 10 "topologyKey": "kubernetes.io/hostname" "type": "ClusterIP", "type": "RollingUpdate", "value": "" "value": "5432" "value": "5432" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "/backrestrepo/some-name-backrest-shared-repo" "value": "backup" "value": "crunchy-pgha-scope" "value": "db" "value": "db" "value": " --db-host=10.109.88.79 --db-path=/pgdata/some-name" "value": "false" "value": "false" "valueFrom": { "value": "INFO" "value": "pgbackrest" "value": "pgbackrest-repo" "value": "/pgdata/some-name" "value": "/pgdata/some-name" "value": "posix" "value": "posix" "value": "postgres" "values": [ "value": "some-name" "value": "some-name-backrest-shared-repo" "value": "some-name-backrest-shared-repo-5ccbf9cc67-qj8qz" "value": "stanza-create" "value": "/tmp" "value": "/tmp" "value": "/tmp/nss_wrapper/pgbackrest-repo/group" "value": "/tmp/nss_wrapper/pgbackrest-repo/passwd" "value": "/tmp/nss_wrapper/postgres/group" "value": "/tmp/nss_wrapper/postgres/passwd" "value": "true" "value": "true" "value": "--type=full --db-host=10.109.88.79 --db-path=/pgdata/some-name" "value": "/usr/lib64/libnss_wrapper.so" "value": "/usr/lib64/libnss_wrapper.so" "value": "{vendor: \"crunchydata\"}" "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "vendor": "crunchydata" "vendor": "crunchydata", "volumeMounts": [ "volumeMounts": [{ "volumes": [ "volumes": [{ "weight": 1, + destroy_operator + kubectl_bin delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WgDRtHVhV1 ++ mktemp + local LAST_ERR=/tmp/tmp.bEiapM9Aso + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.ufGAToIrqQ/operator.yaml + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.WgDRtHVhV1 + cat /tmp/tmp.bEiapM9Aso Error from server (NotFound): error when deleting "/tmp/tmp.ufGAToIrqQ/operator.yaml": serviceaccounts "pgo-deployer-sa" not found Error from server (NotFound): error when deleting "/tmp/tmp.ufGAToIrqQ/operator.yaml": clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found Error from server (NotFound): error when deleting "/tmp/tmp.ufGAToIrqQ/operator.yaml": configmaps "pgo-deployer-cm" not found Error from server (NotFound): error when deleting "/tmp/tmp.ufGAToIrqQ/operator.yaml": clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-crb" not found Error from server (NotFound): error when deleting "/tmp/tmp.ufGAToIrqQ/operator.yaml": jobs.batch "pgo-deploy" not found + rm /tmp/tmp.WgDRtHVhV1 /tmp/tmp.bEiapM9Aso + return 1 + true + yq w -d4 /tmp/tmp.ufGAToIrqQ/operator.yaml 'spec.template.spec.containers[0].env[0].value' uninstall + kubectl_bin apply -f /tmp/tmp.ufGAToIrqQ/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oFybpbKRMg ++ mktemp + local LAST_ERR=/tmp/tmp.gvlLwyKZya + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl apply -f /tmp/tmp.ufGAToIrqQ/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.oFybpbKRMg serviceaccount/pgo-deployer-sa created clusterrole.rbac.authorization.k8s.io/pgo-deployer-cr created configmap/pgo-deployer-cm created clusterrolebinding.rbac.authorization.k8s.io/pgo-deployer-crb created job.batch/pgo-deploy created + cat /tmp/tmp.gvlLwyKZya + rm /tmp/tmp.oFybpbKRMg /tmp/tmp.gvlLwyKZya + return 0 + wait_job_completion pgo-deploy true + local job=pgo-deploy + local mandatory=true + local ns=monitoring-26137 + retry=0 + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' pgo-deploy+ retry=0 ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 1 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 2 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 3 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 4 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 5 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 6 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 7 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 8 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 9 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 10 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 11 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ '' == \1 ]] + sleep 1 + echo -n . .+ let retry+=1 + '[' 12 -ge 600 ']' ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.metadata.name}' ++ grep -io 'not found' + [[ '' == \n\o\t\ \f\o\u\n\d ]] ++ kubectl_bin -n monitoring-26137 get job/pgo-deploy -o 'jsonpath={.status.succeeded}' + [[ 1 == \1 ]] + echo + kubectl_bin delete -f /tmp/tmp.ufGAToIrqQ/operator-destroy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.62uJtZfkEw ++ mktemp + local LAST_ERR=/tmp/tmp.IzBjJuY6el + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete -f /tmp/tmp.ufGAToIrqQ/operator-destroy.yaml + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.62uJtZfkEw serviceaccount "pgo-deployer-sa" deleted clusterrole.rbac.authorization.k8s.io "pgo-deployer-cr" deleted configmap "pgo-deployer-cm" deleted clusterrolebinding.rbac.authorization.k8s.io "pgo-deployer-crb" deleted job.batch "pgo-deploy" deleted + cat /tmp/tmp.IzBjJuY6el + rm /tmp/tmp.62uJtZfkEw /tmp/tmp.IzBjJuY6el + return 0 + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|openshift|^NAME' + awk '{print$1}' ++ mktemp + xargs kubectl delete ns --grace-period=0 --force=true + local LAST_OUT=/tmp/tmp.QMKbZaUl4e ++ mktemp + local LAST_ERR=/tmp/tmp.xvI5wqUJjx + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + set -o errexit + cat /tmp/tmp.QMKbZaUl4e + cat /tmp/tmp.xvI5wqUJjx + rm /tmp/tmp.QMKbZaUl4e /tmp/tmp.xvI5wqUJjx + return 0 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. namespace "monitoring-26137" force deleted + kubectl_bin delete clusterrolebindings pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.a9JokLkZWA ++ mktemp + local LAST_ERR=/tmp/tmp.0aKmK3XuEs + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterrolebindings pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.a9JokLkZWA + cat /tmp/tmp.0aKmK3XuEs Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.a9JokLkZWA /tmp/tmp.0aKmK3XuEs + return 1 + true + kubectl_bin delete clusterroles pgo-cluster-role pgo-deployer-cr ++ mktemp + local LAST_OUT=/tmp/tmp.HhInZB4Dzs ++ mktemp + local LAST_ERR=/tmp/tmp.wjcPpHlq2m + local exit_status=0 + local timeout=4 + set +o errexit ++ seq 0 2 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 4 + for i in '$(seq 0 2)' + KUBECONFIG=/tmp/jen-pg-723-7ed19e708-2-sandbox + kubectl delete clusterroles pgo-cluster-role pgo-deployer-cr + exit_status=1 + [[ 1 != 0 ]] + sleep 8 + set -o errexit + cat /tmp/tmp.HhInZB4Dzs + cat /tmp/tmp.wjcPpHlq2m Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-cluster-role" not found Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pgo-deployer-cr" not found + rm /tmp/tmp.HhInZB4Dzs /tmp/tmp.wjcPpHlq2m + return 1 + true + rm -rf /tmp/tmp.ufGAToIrqQ