=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.45.170.152 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 25 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring-pmm3 === PAUSE kuttl/harness/monitoring-pmm3 === CONT kuttl/harness/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3 | Creating namespace: kuttl-test-deep-minnow logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_client] logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + source ../../functions logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ realpath ../../.. logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ pwd logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++ test_name=monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export PG_VER=17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ PG_VER=17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export BUCKET=pg-operator-testing logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ BUCKET=pg-operator-testing logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ export PGOV1_VER=14 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ PGOV1_VER=14 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ which gdate logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ which date logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ which gsed logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++++ which sed logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ sed=/usr/bin/sed logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | +++ command -v oc logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | ++ oc get projects logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + init_temp_dir logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + rm -rf /tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + mkdir -p /tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + deploy_operator logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + local cw_prefix= logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + destroy_operator logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | + kubectl -n pg-operator delete deployment percona-postgresql-operator --force --grace-period=0 logger.go:42: 15:02:00 | monitoring-pmm3/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:02:01 | monitoring-pmm3/0-deploy-operator | deployment.apps "percona-postgresql-operator" force deleted logger.go:42: 15:02:01 | monitoring-pmm3/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 15:02:01 | monitoring-pmm3/0-deploy-operator | + kubectl delete namespace pg-operator --force --grace-period=0 logger.go:42: 15:02:01 | monitoring-pmm3/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:02:01 | monitoring-pmm3/0-deploy-operator | namespace "pg-operator" force deleted logger.go:42: 15:02:08 | monitoring-pmm3/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 15:02:08 | monitoring-pmm3/0-deploy-operator | + create_namespace pg-operator logger.go:42: 15:02:08 | monitoring-pmm3/0-deploy-operator | + local namespace=pg-operator logger.go:42: 15:02:08 | monitoring-pmm3/0-deploy-operator | + [[ -n '' ]] logger.go:42: 15:02:08 | monitoring-pmm3/0-deploy-operator | + kubectl delete namespace pg-operator --ignore-not-found logger.go:42: 15:02:09 | monitoring-pmm3/0-deploy-operator | + kubectl wait --for=delete namespace pg-operator logger.go:42: 15:02:09 | monitoring-pmm3/0-deploy-operator | + kubectl create namespace pg-operator logger.go:42: 15:02:10 | monitoring-pmm3/0-deploy-operator | namespace/pg-operator created logger.go:42: 15:02:10 | monitoring-pmm3/0-deploy-operator | + cw_prefix=cw- logger.go:42: 15:02:10 | monitoring-pmm3/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy/crd.yaml logger.go:42: 15:02:11 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/crunchybridgeclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 15:02:11 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgbackups.pgv2.percona.com serverside-applied logger.go:42: 15:02:13 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgclusters.pgv2.percona.com serverside-applied logger.go:42: 15:02:13 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgrestores.pgv2.percona.com serverside-applied logger.go:42: 15:02:14 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgupgrades.pgv2.percona.com serverside-applied logger.go:42: 15:02:14 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgadmins.postgres-operator.crunchydata.com serverside-applied logger.go:42: 15:02:14 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgupgrades.postgres-operator.crunchydata.com serverside-applied logger.go:42: 15:02:16 | monitoring-pmm3/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/postgresclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 15:02:16 | monitoring-pmm3/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy/cw-rbac.yaml logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | serviceaccount/percona-postgresql-operator serverside-applied logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | + local disable_telemetry=true logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | + '[' monitoring-pmm3 == telemetry-transfer ']' logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | + kubectl -n pg-operator apply -f - logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | + yq eval '.spec.template.spec.containers[0].image = "perconalab/percona-postgresql-operator:PR-1209-9af81c229"' /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy/cw-operator.yaml logger.go:42: 15:02:17 | monitoring-pmm3/0-deploy-operator | + yq eval '(.spec.template.spec.containers[] | select(.name=="operator") | .env[] | select(.name=="DISABLE_TELEMETRY") | .value) = "true"' - logger.go:42: 15:02:19 | monitoring-pmm3/0-deploy-operator | deployment.apps/percona-postgresql-operator created logger.go:42: 15:02:19 | monitoring-pmm3/0-deploy-operator | + deploy_client logger.go:42: 15:02:19 | monitoring-pmm3/0-deploy-operator | + kubectl -n kuttl-test-deep-minnow apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf/client.yaml logger.go:42: 15:02:20 | monitoring-pmm3/0-deploy-operator | deployment.apps/pg-client created logger.go:42: 15:02:20 | monitoring-pmm3/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:20 | monitoring-pmm3/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:21 | monitoring-pmm3/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:22 | monitoring-pmm3/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:22 | monitoring-pmm3/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:23 | monitoring-pmm3/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 15:02:23 | monitoring-pmm3/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 15:02:23 | monitoring-pmm3/0-deploy-operator | percona-postgresql-operator pg-operator 1 logger.go:42: 15:02:23 | monitoring-pmm3/0-deploy-operator | ASSERT PASS logger.go:42: 15:02:23 | monitoring-pmm3/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm3_server] logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + source ../../functions logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ pwd logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++ test_name=monitoring-pmm3 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export PG_VER=17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ PG_VER=17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export BUCKET=pg-operator-testing logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ BUCKET=pg-operator-testing logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ export PGOV1_VER=14 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ PGOV1_VER=14 logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ which gdate logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ which date logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ date=/usr/bin/date logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ which gsed logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++++ which sed logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ sed=/usr/bin/sed logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | +++ command -v oc logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | ++ oc get projects logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + deploy_pmm3_server logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + helm uninstall -n kuttl-test-deep-minnow pmm logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | Error: uninstall: Release not loaded: pmm: release: not found logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + : logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + platform=kubernetes logger.go:42: 15:02:23 | monitoring-pmm3/1-deploy-pmm-server | + helm uninstall -n kuttl-test-deep-minnow monitoring logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | Error: uninstall: Release not loaded: monitoring: release: not found logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | + : logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | + helm repo remove percona logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | "percona" has been removed from your repositories logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | + kubectl delete clusterrole monitoring --ignore-not-found logger.go:42: 15:02:24 | monitoring-pmm3/1-deploy-pmm-server | + kubectl delete clusterrolebinding monitoring --ignore-not-found logger.go:42: 15:02:25 | monitoring-pmm3/1-deploy-pmm-server | + helm repo add percona https://percona.github.io/percona-helm-charts/ logger.go:42: 15:02:25 | monitoring-pmm3/1-deploy-pmm-server | "percona" has been added to your repositories logger.go:42: 15:02:25 | monitoring-pmm3/1-deploy-pmm-server | + helm install monitoring percona/pmm -n kuttl-test-deep-minnow --set fullnameOverride=monitoring --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --set platform=kubernetes --force logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | NAME: monitoring logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | LAST DEPLOYED: Tue Jul 8 15:02:26 2025 logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | NAMESPACE: kuttl-test-deep-minnow logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | STATUS: deployed logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | REVISION: 1 logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | NOTES: logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | Percona Monitoring and Management (PMM) logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | An open source database monitoring, observability and management tool logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | Get the application URL: logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | NOTE: It may take a few minutes for the LoadBalancer IP to be available. logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | You can watch the status of by running 'kubectl get --namespace kuttl-test-deep-minnow svc -w monitoring-service' logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | export SERVICE_IP=$(kubectl get svc --namespace kuttl-test-deep-minnow monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | echo https://$SERVICE_IP: logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | Get password for the "admin" user: logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace kuttl-test-deep-minnow -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) logger.go:42: 15:02:28 | monitoring-pmm3/1-deploy-pmm-server | echo $ADMIN_PASS logger.go:42: 15:03:12 | monitoring-pmm3/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | starting test step 2-create-pmm-secret logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | running command: [sh -c set -o errexit set -o xtrace source ../../functions kubectl create -n "${NAMESPACE}" secret generic monitoring-pmm3-pmm-secret --from-literal=PMM_SERVER_TOKEN="" || true] logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | + source ../../functions logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ realpath ../../.. logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ pwd logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++ test_name=monitoring-pmm3 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export PG_VER=17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ PG_VER=17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export BUCKET=pg-operator-testing logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ BUCKET=pg-operator-testing logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ export PGOV1_VER=14 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ PGOV1_VER=14 logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ which gdate logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ which date logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ date=/usr/bin/date logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ which gsed logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++++ which sed logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ sed=/usr/bin/sed logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | +++ command -v oc logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | ++ oc get projects logger.go:42: 15:03:12 | monitoring-pmm3/2-create-pmm-secret | + kubectl create -n kuttl-test-deep-minnow secret generic monitoring-pmm3-pmm-secret --from-literal=PMM_SERVER_TOKEN= logger.go:42: 15:03:13 | monitoring-pmm3/2-create-pmm-secret | secret/monitoring-pmm3-pmm-secret created logger.go:42: 15:03:13 | monitoring-pmm3/2-create-pmm-secret | test step completed 2-create-pmm-secret logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | starting test step 3-create-cluster logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.pmm.enabled=true' - \ | yq eval ".spec.pmm.image=\"${IMAGE_PMM3_CLIENT}\"" - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + source ../../functions logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ realpath ../../.. logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ pwd logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++ test_name=monitoring-pmm3 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export PG_VER=17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ PG_VER=17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export BUCKET=pg-operator-testing logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ BUCKET=pg-operator-testing logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ export PGOV1_VER=14 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ PGOV1_VER=14 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ which gdate logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ which date logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ date=/usr/bin/date logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ which gsed logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++++ which sed logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ sed=/usr/bin/sed logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | +++ command -v oc logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ++ oc get projects logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + get_cr logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + local cr_name= logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + '[' -z ']' logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + cr_name=monitoring-pmm3 logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + local repo_path= logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + local source_path= logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + yq eval ' logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .metadata.name = "monitoring-pmm3" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .metadata.labels = {"e2e":"monitoring-pmm3"} | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.postgresVersion = 17 | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.users += [{"name":"postgres","password":{"type":"AlphaNumeric"}}] | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.users += [{"name":"monitoring-pmm3","password":{"type":"AlphaNumeric"}}] | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.image = "perconalab/percona-postgresql-operator:main-ppg17-postgres" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.initContainer.image = "perconalab/percona-postgresql-operator:PR-1209-9af81c229" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-pgbackrest17" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-pgbouncer17" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.pmm.secret = "monitoring-pmm3-pmm-secret" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.pmm.customClusterName = "monitoring-pmm3-pmm-custom-name" | logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | .spec.pmm.postgresParams = "--environment=dev-postgres" logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | ' /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy/cr.yaml logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + kubectl -n kuttl-test-deep-minnow apply -f - logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + yq eval .spec.pmm.enabled=true - logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + [[ -n '' ]] logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + case $test_name in logger.go:42: 15:03:13 | monitoring-pmm3/3-create-cluster | + cat /tmp/kuttl/pg/monitoring-pmm3/cr.yaml logger.go:42: 15:03:14 | monitoring-pmm3/3-create-cluster | perconapgcluster.pgv2.percona.com/monitoring-pmm3 created logger.go:42: 15:04:29 | monitoring-pmm3/3-create-cluster | test step completed 3-create-cluster logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | starting test step 4-update-pmm-server-token logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | running command: [sh -c set -o errexit set -o xtrace source ../../functions token=$(generate_pmm3_server_token) [[ -n ${token} && ${token} != null ]] \ && kubectl -n ${NAMESPACE} patch secret monitoring-pmm3-pmm-secret --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "'${token}'"}}' \ || true sleep 25] logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | + source ../../functions logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ realpath ../../.. logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ pwd logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ test_name=monitoring-pmm3 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export PG_VER=17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ PG_VER=17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export BUCKET=pg-operator-testing logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ BUCKET=pg-operator-testing logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ export PGOV1_VER=14 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ PGOV1_VER=14 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ which gdate logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ which date logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ date=/usr/bin/date logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ which gsed logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ which sed logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ sed=/usr/bin/sed logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ command -v oc logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ oc get projects logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ generate_pmm3_server_token logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ local key_name=8151 logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ local ADMIN_PASSWORD logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ kubectl -n kuttl-test-deep-minnow get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | +++ base64 --decode logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ ADMIN_PASSWORD='rqg;*n#~*t5>6w~ ' logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ [[ -z rqg;*n#~*t5>6w~ ]] logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++ local create_response create_status_code create_json_response logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ get_service_ip monitoring-service logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ local service=monitoring-service logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:04:29 | monitoring-pmm3/4-update-pmm-server-token | ++++ grep -q NotFound logger.go:42: 15:04:30 | monitoring-pmm3/4-update-pmm-server-token | +++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:04:30 | monitoring-pmm3/4-update-pmm-server-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 15:04:30 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 15:04:30 | monitoring-pmm3/4-update-pmm-server-token | ++++ egrep -q 'hostname|ip' logger.go:42: 15:04:31 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 15:04:31 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 15:04:31 | monitoring-pmm3/4-update-pmm-server-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"8151", "role":"Admin", "isDisabled":false}' --user 'admin:rqg;*n#~*t5>6w~ ' https://34.55.125.97/graph/api/serviceaccounts -w '\n%{http_code}' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ create_response='{"id":2,"uid":"aerbmh3l3jg8wf","name":"8151","login":"sa-1-8151","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | 201' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ echo '{"id":2,"uid":"aerbmh3l3jg8wf","name":"8151","login":"sa-1-8151","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ tail -n1 logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | 201' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ create_status_code=201 logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ echo '{"id":2,"uid":"aerbmh3l3jg8wf","name":"8151","login":"sa-1-8151","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | 201' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ sed '$ d' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ create_json_response='{"id":2,"uid":"aerbmh3l3jg8wf","name":"8151","login":"sa-1-8151","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ [[ 201 -ne 201 ]] logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ local service_account_id logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ echo '{"id":2,"uid":"aerbmh3l3jg8wf","name":"8151","login":"sa-1-8151","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++ jq -r .id logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ service_account_id=2 logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ [[ -z 2 ]] logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ [[ 2 == \n\u\l\l ]] logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++ local token_response token_status_code token_json_response logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++++ get_service_ip monitoring-service logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++++ local service=monitoring-service logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | ++++ grep -q NotFound logger.go:42: 15:04:32 | monitoring-pmm3/4-update-pmm-server-token | +++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:04:33 | monitoring-pmm3/4-update-pmm-server-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 15:04:33 | monitoring-pmm3/4-update-pmm-server-token | ++++ egrep -q 'hostname|ip' logger.go:42: 15:04:33 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 15:04:33 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 15:04:33 | monitoring-pmm3/4-update-pmm-server-token | ++++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"8151"}' --user 'admin:rqg;*n#~*t5>6w~ ' https://34.55.125.97/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ token_response='{"id":1,"name":"8151","key":"glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"} logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | 200' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | +++ tail -n1 logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | +++ echo '{"id":1,"name":"8151","key":"glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"} logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | 200' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ token_status_code=200 logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | +++ sed '$ d' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | +++ echo '{"id":1,"name":"8151","key":"glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"} logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | 200' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ token_json_response='{"id":1,"name":"8151","key":"glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"}' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ [[ 200 -ne 200 ]] logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ jq -r .key logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | ++ echo '{"id":1,"name":"8151","key":"glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"}' logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | + token=glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | + [[ -n glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e ]] logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | + [[ glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e != null ]] logger.go:42: 15:04:34 | monitoring-pmm3/4-update-pmm-server-token | + kubectl -n kuttl-test-deep-minnow patch secret monitoring-pmm3-pmm-secret --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e"}}' logger.go:42: 15:04:35 | monitoring-pmm3/4-update-pmm-server-token | secret/monitoring-pmm3-pmm-secret patched logger.go:42: 15:04:35 | monitoring-pmm3/4-update-pmm-server-token | + sleep 25 logger.go:42: 15:05:38 | monitoring-pmm3/4-update-pmm-server-token | test step completed 4-update-pmm-server-token logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | starting test step 5-check-qan logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | running command: [sh -c set -o errexit set -o xtrace source ../../functions token=$(kubectl get -n "${NAMESPACE}" secret monitoring-pmm3-pmm-secret --template='{{.data.PMM_SERVER_TOKEN | base64decode}}') instance=$(kubectl get -n "${NAMESPACE}" pod -l postgres-operator.crunchydata.com/instance-set=instance1 -o 'jsonpath={.items[].metadata.name}') get_metric_values node_boot_time_seconds ${NAMESPACE}-${instance} ${token} get_metric_values patroni_postgres_running ${NAMESPACE}-${instance} ${token} get_qan20_values_pmm3 ${NAMESPACE}-${instance} ${token}] logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | + source ../../functions logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ realpath ../../.. logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ pwd logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/tests/monitoring-pmm3 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ test_name=monitoring-pmm3 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/vars.sh logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/deploy logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/e2e-tests/conf logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ TEMP_DIR=/tmp/kuttl/pg/monitoring-pmm3 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export GIT_BRANCH=PR-1209 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ GIT_BRANCH=PR-1209 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export VERSION=PR-1209-9af81c229 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ VERSION=PR-1209-9af81c229 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1209-9af81c229 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export PG_VER=17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ PG_VER=17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export BUCKET=pg-operator-testing logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ BUCKET=pg-operator-testing logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export PGOV1_TAG=1.4.0 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ PGOV1_TAG=1.4.0 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ export PGOV1_VER=14 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ PGOV1_VER=14 logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ which gdate logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ which date logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ date=/usr/bin/date logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ which gsed logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1209/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++++ which sed logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ sed=/usr/bin/sed logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | +++ command -v oc logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ oc get projects logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ kubectl get -n kuttl-test-deep-minnow secret monitoring-pmm3-pmm-secret '--template={{.data.PMM_SERVER_TOKEN | base64decode}}' logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | + token=glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:05:38 | monitoring-pmm3/5-check-qan | ++ kubectl get -n kuttl-test-deep-minnow pod -l postgres-operator.crunchydata.com/instance-set=instance1 -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + instance=monitoring-pmm3-instance1-m9jb-0 logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + get_metric_values node_boot_time_seconds kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0 glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + local metric=node_boot_time_seconds logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + local instance=kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0 logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + local token=glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + local start=1751986839 logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | + local end=1751987139 logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ get_service_ip monitoring-service logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ local service=monitoring-service logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | ++ grep -q NotFound logger.go:42: 15:05:39 | monitoring-pmm3/5-check-qan | +++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:05:40 | monitoring-pmm3/5-check-qan | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 15:05:40 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 15:05:40 | monitoring-pmm3/5-check-qan | ++ egrep -q 'hostname|ip' logger.go:42: 15:05:40 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 15:05:40 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local endpoint=34.55.125.97 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local wait_count=20 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local retry=0 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986839&end=1751987139&step=60' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + [[ -n "1751985703" ]] logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + get_metric_values patroni_postgres_running kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0 glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local metric=patroni_postgres_running logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local instance=kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local token=glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local start=1751986841 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | + local end=1751987141 logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ get_service_ip monitoring-service logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ local service=monitoring-service logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:05:41 | monitoring-pmm3/5-check-qan | ++ grep -q NotFound logger.go:42: 15:05:42 | monitoring-pmm3/5-check-qan | +++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.spec.type}' logger.go:42: 15:05:42 | monitoring-pmm3/5-check-qan | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 15:05:42 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 15:05:42 | monitoring-pmm3/5-check-qan | ++ egrep -q 'hostname|ip' logger.go:42: 15:05:42 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | ++ kubectl get service/monitoring-service -n kuttl-test-deep-minnow -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | + local endpoint=34.55.125.97 logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | + local wait_count=20 logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | + local retry=0 logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986841&end=1751987141&step=60' logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:43 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:44 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:44 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:44 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + local start=1751986846 logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + local end=1751987146 logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + [[ 1 -ge 20 ]] logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986846&end=1751987146&step=60' logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:46 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | + local start=1751986848 logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | + local end=1751987148 logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | + [[ 2 -ge 20 ]] logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:48 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986848&end=1751987148&step=60' logger.go:42: 15:05:49 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:49 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:49 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + local start=1751986851 logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + local end=1751987151 logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + [[ 3 -ge 20 ]] logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986851&end=1751987151&step=60' logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:51 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + local start=1751986853 logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + local end=1751987153 logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + [[ 4 -ge 20 ]] logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986853&end=1751987153&step=60' logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:53 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | + local start=1751986855 logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | + local end=1751987155 logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | + [[ 5 -ge 20 ]] logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986855&end=1751987155&step=60' logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:55 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:56 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:56 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:56 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + local start=1751986858 logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + local end=1751987158 logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + [[ 6 -ge 20 ]] logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986858&end=1751987158&step=60' logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:05:58 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | + local start=1751986860 logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | + local end=1751987160 logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | + [[ 7 -ge 20 ]] logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986860&end=1751987160&step=60' logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:00 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:01 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:01 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:01 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + local start=1751986863 logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + local end=1751987163 logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + [[ 8 -ge 20 ]] logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986863&end=1751987163&step=60' logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:03 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | + local start=1751986865 logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | + local end=1751987165 logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | + [[ 9 -ge 20 ]] logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:05 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986865&end=1751987165&step=60' logger.go:42: 15:06:06 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:06 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:06 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + local start=1751986868 logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + local end=1751987168 logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + [[ 10 -ge 20 ]] logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986868&end=1751987168&step=60' logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:08 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | + local start=1751986870 logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | + local end=1751987170 logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | + [[ 11 -ge 20 ]] logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986870&end=1751987170&step=60' logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:10 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:11 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:11 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:11 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + local start=1751986873 logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + local end=1751987173 logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + [[ 12 -ge 20 ]] logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986873&end=1751987173&step=60' logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:13 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | + local start=1751986875 logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | + local end=1751987175 logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | + [[ 13 -ge 20 ]] logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986875&end=1751987175&step=60' logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:15 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:16 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:16 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:16 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + local start=1751986878 logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + local end=1751987178 logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + [[ 14 -ge 20 ]] logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986878&end=1751987178&step=60' logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:18 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + local start=1751986880 logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + local end=1751987180 logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + [[ 15 -ge 20 ]] logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986880&end=1751987180&step=60' logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:20 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | + local start=1751986882 logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | + local end=1751987182 logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | + [[ 16 -ge 20 ]] logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:22 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986882&end=1751987182&step=60' logger.go:42: 15:06:23 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:23 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:23 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + local start=1751986885 logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + local end=1751987185 logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + [[ 17 -ge 20 ]] logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986885&end=1751987185&step=60' logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:25 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | + local start=1751986887 logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | + local end=1751987187 logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | + [[ 18 -ge 20 ]] logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:27 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986887&end=1751987187&step=60' logger.go:42: 15:06:28 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:28 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:28 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + local start=1751986890 logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + local end=1751987190 logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + [[ 19 -ge 20 ]] logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | ++ curl -s -k -H 'Authorization: Bearer glsa_zF08KN3MJNGPUyf3cA0e0AmthfwrCpuG_ea01306e' 'https://34.55.125.97/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7d%20or%20patroni_postgres_running%7Bnode_name%3D%7E%22kuttl-test-deep-minnow-monitoring-pmm3-instance1-m9jb-0%22%7D%29&start=1751986890&end=1751987190&step=60' logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | ++ grep '^"[0-9]' logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | ++ jq '.data.result[0].values[][1]' logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | jq: error (at :0): Cannot iterate over null (null) logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + [[ -n '' ]] logger.go:42: 15:06:30 | monitoring-pmm3/5-check-qan | + sleep 2 logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s -d '-5 minute' logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | + local start=1751986892 logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | ++ /usr/bin/date -u +%s logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | + local end=1751987192 logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | + let retry+=1 logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | + [[ 20 -ge 20 ]] logger.go:42: 15:06:32 | monitoring-pmm3/5-check-qan | + exit 1 case.go:396: failed in step 5-check-qan case.go:398: command "set -o xtrace\\n source ../../functions\\n token=$(kubectl get -n \"${..." failed, exit status 1 logger.go:42: 15:06:33 | monitoring-pmm3 | monitoring-pmm3 events from ns kuttl-test-deep-minnow: logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:20 +0000 UTC Normal Pod pg-client-84d6c45668-mvz5p Binding Scheduled Successfully assigned kuttl-test-deep-minnow/pg-client-84d6c45668-mvz5p to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:20 +0000 UTC Normal Pod pg-client-84d6c45668-mvz5p.spec.containers{pg-client} Pulling Pulling image "perconalab/percona-distribution-postgresql:16" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:20 +0000 UTC Normal ReplicaSet.apps pg-client-84d6c45668 SuccessfulCreate Created pod: pg-client-84d6c45668-mvz5p replicaset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:20 +0000 UTC Normal Deployment.apps pg-client ScalingReplicaSet Scaled up replica set pg-client-84d6c45668 to 1 deployment-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:21 +0000 UTC Normal Pod pg-client-84d6c45668-mvz5p.spec.containers{pg-client} Pulled Successfully pulled image "perconalab/percona-distribution-postgresql:16" in 251ms (251ms including waiting). Image size: 324679112 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:21 +0000 UTC Normal Pod pg-client-84d6c45668-mvz5p.spec.containers{pg-client} Created Created container: pg-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:21 +0000 UTC Normal Pod pg-client-84d6c45668-mvz5p.spec.containers{pg-client} Started Started container pg-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmm-storage-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:28 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-deep-minnow/pmm-storage-monitoring-0" pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:32 +0000 UTC Normal Pod monitoring-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-9hh0 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:32 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-20129fe9-1f39-49f4-ba10-f16631da3d5a pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:40 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-20129fe9-1f39-49f4-ba10-f16631da3d5a" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:42 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Pulled Container image "perconalab/pmm-server:3-dev-latest" already present on machine kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:42 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Created Created container: pmm kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:42 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Started Started container pmm kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:43 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: Get "http://10.66.98.23:8080/v1/readyz": dial tcp 10.66.98.23:8080: connect: connection refused kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:46 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 500 kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:02:51 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:11 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:14 +0000 UTC Normal Pod monitoring-pmm3-patroni-version-check Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-patroni-version-check to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:15 +0000 UTC Normal Pod monitoring-pmm3-patroni-version-check.spec.containers{patroni-version-check} Pulled Container image "perconalab/percona-postgresql-operator:main-ppg17-postgres" already present on machine kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:15 +0000 UTC Normal Pod monitoring-pmm3-patroni-version-check.spec.containers{patroni-version-check} Created Created container: patroni-version-check kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:15 +0000 UTC Normal Pod monitoring-pmm3-patroni-version-check.spec.containers{patroni-version-check} Started Started container patroni-version-check kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:20 +0000 UTC Normal Pod monitoring-pmm3-patroni-version-check.spec.containers{patroni-version-check} Killing Stopping container patroni-version-check kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:21 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-m9jb-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:21 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-r55m-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:21 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-r55m-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-deep-minnow/monitoring-pmm3-instance1-r55m-pgdata" pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:21 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-r55m-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:21 +0000 UTC Normal StatefulSet.apps monitoring-pmm3-instance1-r55m SuccessfulCreate create Pod monitoring-pmm3-instance1-r55m-0 in StatefulSet monitoring-pmm3-instance1-r55m successful statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-m9jb-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-deep-minnow/monitoring-pmm3-instance1-m9jb-pgdata" pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-m9jb-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal StatefulSet.apps monitoring-pmm3-instance1-m9jb SuccessfulCreate create Pod monitoring-pmm3-instance1-m9jb-0 in StatefulSet monitoring-pmm3-instance1-m9jb successful statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-nm9d-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-nm9d-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-nm9d-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-deep-minnow/monitoring-pmm3-instance1-nm9d-pgdata" pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:22 +0000 UTC Normal StatefulSet.apps monitoring-pmm3-instance1-nm9d SuccessfulCreate create Pod monitoring-pmm3-instance1-nm9d-0 in StatefulSet monitoring-pmm3-instance1-nm9d successful statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:23 +0000 UTC Normal StatefulSet.apps monitoring-pmm3-repo-host SuccessfulCreate create Pod monitoring-pmm3-repo-host-0 in StatefulSet monitoring-pmm3-repo-host successful statefulset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:23 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-repo1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:23 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-repo1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:23 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-repo1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-deep-minnow/monitoring-pmm3-repo1" pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:23 +0000 UTC Normal PostgresCluster.postgres-operator.crunchydata.com monitoring-pmm3 RepoHostCreated created pgBackRest repository host StatefulSet/monitoring-pmm3-repo-host postgrescluster-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-9hh0 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 251ms (251ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal ReplicaSet.apps monitoring-pmm3-pgbouncer-fd84f68d4 SuccessfulCreate Created pod: monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx replicaset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal ReplicaSet.apps monitoring-pmm3-pgbouncer-fd84f68d4 SuccessfulCreate Created pod: monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt replicaset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal ReplicaSet.apps monitoring-pmm3-pgbouncer-fd84f68d4 SuccessfulCreate Created pod: monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm replicaset-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal PodDisruptionBudget.policy monitoring-pmm3-pgbouncer NoPods No matching pods found controllermanager logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:24 +0000 UTC Normal Deployment.apps monitoring-pmm3-pgbouncer ScalingReplicaSet Scaled up replica set monitoring-pmm3-pgbouncer-fd84f68d4 to 3 deployment-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-r55m-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-r55m-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-3c2f902f-3d6b-4e10-8a29-93787d706e5d pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 177ms (177ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 223ms (223ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-lvhdt.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 211ms (211ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:25 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-t4hxx.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-m9jb-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-9hh0 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-m9jb-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-197b5454-9d37-4221-9d83-a6fadb525bf0 pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-nm9d-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-instance1-nm9d-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-1196caf9-4d8e-4613-9f02-a88f5c5d6d35 pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 232ms (232ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:26 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbouncer17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:27 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbouncer17" in 205ms (205ms including waiting). Image size: 82600053 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:27 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:27 +0000 UTC Normal Pod monitoring-pmm3-pgbouncer-fd84f68d4-2v2wm.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:27 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-repo-host-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:27 +0000 UTC Normal PersistentVolumeClaim monitoring-pmm3-repo1 ProvisioningSucceeded Successfully provisioned volume pvc-1c621434-9c63-439c-ba28-3b2c6119e86a pd.csi.storage.gke.io_gke-3e77e01163184a5c81e6-71e9-7ce5-vm_40e90ef0-b7f0-43fe-a211-e7d099800777 logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-m9jb-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-nm9d-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-9hh0 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:31 +0000 UTC Normal PodDisruptionBudget.policy monitoring-pmm3-set-instance1 NoPods No matching pods found controllermanager logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:32 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-r55m-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:33 +0000 UTC Warning Pod monitoring-pmm3-instance1-nm9d-0 FailedMount MountVolume.SetUp failed for volume "cert-volume" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:33 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-3c2f902f-3d6b-4e10-8a29-93787d706e5d" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:34 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-197b5454-9d37-4221-9d83-a6fadb525bf0" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:34 +0000 UTC Warning Pod monitoring-pmm3-instance1-m9jb-0 FailedAttachVolume Multi-Attach error for volume "pvc-197b5454-9d37-4221-9d83-a6fadb525bf0" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:34 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1196caf9-4d8e-4613-9f02-a88f5c5d6d35" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:34 +0000 UTC Warning Pod monitoring-pmm3-instance1-nm9d-0 FailedAttachVolume Multi-Attach error for volume "pvc-1196caf9-4d8e-4613-9f02-a88f5c5d6d35" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:35 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1c621434-9c63-439c-ba28-3b2c6119e86a" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 206ms (206ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 216ms (216ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{pgbackrest-log-dir} Created Created container: pgbackrest-log-dir kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{pgbackrest-log-dir} Started Started container pgbackrest-log-dir kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:37 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 223ms (223ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 153ms (153ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:38 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 222ms (222ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 223ms (223ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 162ms (162ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest-config} Created Created container: pgbackrest-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:39 +0000 UTC Normal Pod monitoring-pmm3-repo-host-0.spec.containers{pgbackrest-config} Started Started container pgbackrest-config kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 235ms (235ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 234ms (234ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 218ms (218ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:40 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:41 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:47 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-backup-rhbx-8xsww to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:47 +0000 UTC Normal Job.batch monitoring-pmm3-backup-rhbx SuccessfulCreate Created pod: monitoring-pmm3-backup-rhbx-8xsww job-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:47 +0000 UTC Normal PostgresCluster.postgres-operator.crunchydata.com monitoring-pmm3 StanzasCreated pgBackRest stanza creation completed successfully postgrescluster-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:48 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:48 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 226ms (226ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:48 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:48 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:49 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:49 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-197b5454-9d37-4221-9d83-a6fadb525bf0" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 171ms (171ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-backup-rhbx-8xsww.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 222ms (222ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1196caf9-4d8e-4613-9f02-a88f5c5d6d35" attachdetach-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 239ms (239ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 202ms (202ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 194ms (194ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 239ms (239ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 203ms (203ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 234ms (234ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 266ms (266ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 209ms (209ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 229ms (229ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:54 +0000 UTC Warning PostgresCluster.postgres-operator.crunchydata.com monitoring-pmm3 UnableToCreateStanzas command terminated with exit code 50: repo1-path = /pgbackrest/repo1 2025-07-08 15:03:54.349 P00 ERROR: [050]: raised from remote-0 tls protocol on 'monitoring-pmm3-repo-host-0.monitoring-pmm3-pods.kuttl-test-deep-minnow.svc.cluster.local.': unable to acquire lock on file '/tmp/pgbackrest/db-backup-1.lock': Resource temporarily unavailable HINT: is another pgBackRest process running? 2025-07-08 15:03:54.456 P00 ERROR: [050]: raised from remote-0 tls protocol on 'monitoring-pmm3-repo-host-0.monitoring-pmm3-pods.kuttl-test-deep-minnow.svc.cluster.local.': unable to acquire lock on file '/tmp/pgbackrest/db-backup-1.lock': Resource temporarily unavailable HINT: is another pgBackRest process running? postgrescluster-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 233ms (233ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 215ms (215ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:03:55 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:27 +0000 UTC Normal Job.batch monitoring-pmm3-backup-rhbx Completed Job completed job-controller logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:42 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-m9jb-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-nd35 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:49 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:49 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 262ms (262ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:49 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:49 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 225ms (225ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:50 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 228ms (228ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:51 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 225ms (225ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 193ms (193ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pmm-client} Pulled Container image "perconalab/pmm-client:3-dev-latest" already present on machine kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:52 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:53 +0000 UTC Normal Pod monitoring-pmm3-instance1-m9jb-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 205ms (205ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:04:58 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-nm9d-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-9hh0 default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:08 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:08 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 251ms (251ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:08 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:09 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:09 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:10 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 226ms (226ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:10 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:10 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:10 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:11 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 239ms (239ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:11 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:11 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:11 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 240ms (240ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 233ms (233ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pmm-client} Pulled Container image "perconalab/pmm-client:3-dev-latest" already present on machine kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:12 +0000 UTC Normal Pod monitoring-pmm3-instance1-nm9d-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 216ms (216ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:25 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0 Binding Scheduled Successfully assigned kuttl-test-deep-minnow/monitoring-pmm3-instance1-r55m-0 to gke-jen-pg-1209-9af81c22-default-pool-4acb702b-6v8b default-scheduler logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:26 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:27 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 266ms (266ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:27 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:27 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:28 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:28 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-1209-9af81c229" in 227ms (227ms including waiting). Image size: 95750074 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:28 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:28 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:29 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:29 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 239ms (239ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:29 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:29 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 286ms (286ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{database} Started Started container database kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg17-postgres" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg17-postgres" in 250ms (250ms including waiting). Image size: 527701623 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:30 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Created Created container: replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pmm-client} Pulled Container image "perconalab/pmm-client:3-dev-latest" already present on machine kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-pgbackrest17" kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | 2025-07-08 15:05:31 +0000 UTC Normal Pod monitoring-pmm3-instance1-r55m-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-pgbackrest17" in 213ms (213ms including waiting). Image size: 151983892 bytes. kubelet logger.go:42: 15:06:33 | monitoring-pmm3 | Deleting namespace: kuttl-test-deep-minnow === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- FAIL: kuttl (324.60s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/monitoring-pmm3 (323.95s) FAIL