=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.58.34.71 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 26 tests === RUN kuttl/harness === RUN kuttl/harness/one-pod === PAUSE kuttl/harness/one-pod === CONT kuttl/harness/one-pod logger.go:42: 16:29:33 | one-pod | Creating namespace: kuttl-test-amazed-midge logger.go:42: 16:29:33 | one-pod/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 16:29:33 | one-pod/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_client deploy_s3_secrets] logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + source ../../functions logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ realpath ../../.. logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ pwd logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/tests/one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++ test_name=one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/vars.sh logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/conf logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/conf logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/pg/one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/pg/one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export GIT_BRANCH=PR-1233 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ GIT_BRANCH=PR-1233 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export VERSION=PR-1233-c05fe9b81 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ VERSION=PR-1233-c05fe9b81 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1233-c05fe9b81 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1233-c05fe9b81 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export PG_VER=17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ PG_VER=17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export BUCKET=pg-operator-testing logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ BUCKET=pg-operator-testing logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ export PGOV1_VER=14 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ PGOV1_VER=14 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ which gdate logger.go:42: 16:29:33 | one-pod/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ which date logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ which gsed logger.go:42: 16:29:33 | one-pod/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++++ which sed logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ sed=/usr/bin/sed logger.go:42: 16:29:33 | one-pod/0-deploy-operator | +++ command -v oc logger.go:42: 16:29:33 | one-pod/0-deploy-operator | ++ oc get projects logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + init_temp_dir logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + rm -rf /tmp/kuttl/pg/one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + mkdir -p /tmp/kuttl/pg/one-pod logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + deploy_operator logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + local cw_prefix= logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + destroy_operator logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + kubectl -n pg-operator delete deployment percona-postgresql-operator --force --grace-period=0 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 16:29:33 | one-pod/0-deploy-operator | deployment.apps "percona-postgresql-operator" force deleted logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 16:29:33 | one-pod/0-deploy-operator | + kubectl delete namespace pg-operator --force --grace-period=0 logger.go:42: 16:29:33 | one-pod/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 16:29:34 | one-pod/0-deploy-operator | namespace "pg-operator" force deleted logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + create_namespace pg-operator logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + local namespace=pg-operator logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + [[ -n '' ]] logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + kubectl delete namespace pg-operator --ignore-not-found logger.go:42: 16:29:41 | one-pod/0-deploy-operator | + kubectl wait --for=delete namespace pg-operator logger.go:42: 16:29:42 | one-pod/0-deploy-operator | + kubectl create namespace pg-operator logger.go:42: 16:29:42 | one-pod/0-deploy-operator | namespace/pg-operator created logger.go:42: 16:29:42 | one-pod/0-deploy-operator | + cw_prefix=cw- logger.go:42: 16:29:42 | one-pod/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy/crd.yaml logger.go:42: 16:29:43 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/crunchybridgeclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:29:43 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgbackups.pgv2.percona.com serverside-applied logger.go:42: 16:29:45 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgclusters.pgv2.percona.com serverside-applied logger.go:42: 16:29:45 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgrestores.pgv2.percona.com serverside-applied logger.go:42: 16:29:46 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgupgrades.pgv2.percona.com serverside-applied logger.go:42: 16:29:46 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgadmins.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:29:46 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgupgrades.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:29:48 | one-pod/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/postgresclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:29:48 | one-pod/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy/cw-rbac.yaml logger.go:42: 16:29:49 | one-pod/0-deploy-operator | serviceaccount/percona-postgresql-operator serverside-applied logger.go:42: 16:29:49 | one-pod/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 16:29:49 | one-pod/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 16:29:49 | one-pod/0-deploy-operator | + local disable_telemetry=true logger.go:42: 16:29:49 | one-pod/0-deploy-operator | + '[' one-pod == telemetry-transfer ']' logger.go:42: 16:29:49 | one-pod/0-deploy-operator | + yq eval '.spec.template.spec.containers[0].image = "perconalab/percona-postgresql-operator:PR-1233-c05fe9b81"' /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy/cw-operator.yaml logger.go:42: 16:29:49 | one-pod/0-deploy-operator | + kubectl -n pg-operator apply -f - logger.go:42: 16:29:49 | one-pod/0-deploy-operator | + yq eval '(.spec.template.spec.containers[] | select(.name=="operator") | .env[] | select(.name=="DISABLE_TELEMETRY") | .value) = "true"' - logger.go:42: 16:29:50 | one-pod/0-deploy-operator | deployment.apps/percona-postgresql-operator created logger.go:42: 16:29:50 | one-pod/0-deploy-operator | + deploy_client logger.go:42: 16:29:50 | one-pod/0-deploy-operator | + kubectl -n kuttl-test-amazed-midge apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/conf/client.yaml logger.go:42: 16:29:51 | one-pod/0-deploy-operator | deployment.apps/pg-client created logger.go:42: 16:29:51 | one-pod/0-deploy-operator | + deploy_s3_secrets logger.go:42: 16:29:51 | one-pod/0-deploy-operator | + set +o xtrace logger.go:42: 16:29:52 | one-pod/0-deploy-operator | secret/one-pod-pgbackrest-secrets created logger.go:42: 16:29:52 | one-pod/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:29:52 | one-pod/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:29:53 | one-pod/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 16:29:54 | one-pod/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:29:54 | one-pod/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:29:55 | one-pod/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 16:29:55 | one-pod/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 16:29:55 | one-pod/0-deploy-operator | percona-postgresql-operator pg-operator 1 logger.go:42: 16:29:55 | one-pod/0-deploy-operator | ASSERT PASS logger.go:42: 16:29:55 | one-pod/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 16:29:55 | one-pod/1-create-cluster | starting test step 1-create-cluster logger.go:42: 16:29:55 | one-pod/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval ' .spec.proxy.pgBouncer.replicas=1 | .spec.instances[].replicas=1' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 16:29:55 | one-pod/1-create-cluster | + source ../../functions logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ realpath ../../.. logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ pwd logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/tests/one-pod logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++ test_name=one-pod logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/vars.sh logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/conf logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/e2e-tests/conf logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/pg/one-pod logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/pg/one-pod logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export GIT_BRANCH=PR-1233 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ GIT_BRANCH=PR-1233 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export VERSION=PR-1233-c05fe9b81 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ VERSION=PR-1233-c05fe9b81 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-1233-c05fe9b81 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE=perconalab/percona-postgresql-operator:PR-1233-c05fe9b81 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export PG_VER=17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ PG_VER=17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export BUCKET=pg-operator-testing logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ BUCKET=pg-operator-testing logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_PMM3_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ IMAGE_PMM3_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ export PGOV1_VER=14 logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ PGOV1_VER=14 logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ which gdate logger.go:42: 16:29:55 | one-pod/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ which date logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ which gsed logger.go:42: 16:29:55 | one-pod/1-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-1233/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++++ which sed logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ sed=/usr/bin/sed logger.go:42: 16:29:55 | one-pod/1-create-cluster | +++ command -v oc logger.go:42: 16:29:55 | one-pod/1-create-cluster | ++ oc get projects logger.go:42: 16:29:55 | one-pod/1-create-cluster | + get_cr logger.go:42: 16:29:55 | one-pod/1-create-cluster | + local cr_name= logger.go:42: 16:29:55 | one-pod/1-create-cluster | + '[' -z ']' logger.go:42: 16:29:55 | one-pod/1-create-cluster | + cr_name=one-pod logger.go:42: 16:29:55 | one-pod/1-create-cluster | + local repo_path= logger.go:42: 16:29:55 | one-pod/1-create-cluster | + local source_path= logger.go:42: 16:29:55 | one-pod/1-create-cluster | + yq eval ' logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.proxy.pgBouncer.replicas=1 | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.instances[].replicas=1' - logger.go:42: 16:29:55 | one-pod/1-create-cluster | + yq eval ' logger.go:42: 16:29:55 | one-pod/1-create-cluster | .metadata.name = "one-pod" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .metadata.labels = {"e2e":"one-pod"} | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.postgresVersion = 17 | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.users += [{"name":"postgres","password":{"type":"AlphaNumeric"}}] | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.users += [{"name":"one-pod","password":{"type":"AlphaNumeric"}}] | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.image = "perconalab/percona-postgresql-operator:main-ppg17-postgres" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.initContainer.image = "perconalab/percona-postgresql-operator:PR-1233-c05fe9b81" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-pgbackrest17" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-pgbouncer17" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.pmm.secret = "one-pod-pmm-secret" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.pmm.customClusterName = "one-pod-pmm-custom-name" | logger.go:42: 16:29:55 | one-pod/1-create-cluster | .spec.pmm.postgresParams = "--environment=dev-postgres" logger.go:42: 16:29:55 | one-pod/1-create-cluster | ' /mnt/jenkins/workspace/cloud-pg-operator_PR-1233/deploy/cr.yaml logger.go:42: 16:29:55 | one-pod/1-create-cluster | + kubectl -n kuttl-test-amazed-midge apply -f - logger.go:42: 16:29:55 | one-pod/1-create-cluster | + [[ -n '' ]] logger.go:42: 16:29:55 | one-pod/1-create-cluster | + case $test_name in logger.go:42: 16:29:55 | one-pod/1-create-cluster | + cat /tmp/kuttl/pg/one-pod/cr.yaml logger.go:42: 16:29:56 | one-pod/1-create-cluster | perconapgcluster.pgv2.percona.com/one-pod created logger.go:42: 16:31:56 | one-pod/1-create-cluster | test step failed 1-create-cluster case.go:396: failed in step 1-create-cluster case.go:398: no resources matched of kind: apps/v1, Kind=StatefulSet case.go:398: deployments.apps "one-pod-pgbouncer" not found case.go:398: no resources matched of kind: batch/v1, Kind=Job case.go:398: --- PostgresCluster:kuttl-test-amazed-midge/one-pod +++ PostgresCluster:kuttl-test-amazed-midge/one-pod @@ -1,8 +1,15 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: + annotations: + postgres-operator.crunchydata.com/autoCreateUserSchema: "true" + postgres-operator.crunchydata.com/patroni-version: 4.0.5 finalizers: - postgres-operator.crunchydata.com/finalizer + labels: + e2e: one-pod + pgv2.percona.com/version: 2.7.0 + managedFields: '[... elided field over 10 lines long ...]' name: one-pod namespace: kuttl-test-amazed-midge ownerReferences: @@ -11,19 +18,8 @@ controller: true kind: PerconaPGCluster name: one-pod + uid: 3363521b-7daf-4f36-831a-5be6a21d2869 +spec: '[... elided field over 10 lines long ...]' status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 observedGeneration: 1 - pgbackrest: - repos: - - name: repo1 - stanzaCreated: true - proxy: - pgBouncer: - readyReplicas: 1 - replicas: 1 case.go:398: resource PostgresCluster:kuttl-test-amazed-midge/one-pod: .status.instances: key is missing from map case.go:398: --- PerconaPGCluster:kuttl-test-amazed-midge/one-pod +++ PerconaPGCluster:kuttl-test-amazed-midge/one-pod @@ -1,18 +1,36 @@ apiVersion: pgv2.percona.com/v2 kind: PerconaPGCluster metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"pgv2.percona.com/v2","kind":"PerconaPGCluster","metadata":{"annotations":{},"labels":{"e2e":"one-pod"},"name":"one-pod","namespace":"kuttl-test-amazed-midge"},"spec":{"backups":{"pgbackrest":{"image":"perconalab/percona-postgresql-operator:main-pgbackrest17","manual":{"options":["--type=full"],"repoName":"repo1"},"repoHost":{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/data":"pgbackrest"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}}},"repos":[{"name":"repo1","schedules":{"full":"0 0 * * 6"},"volume":{"volumeClaimSpec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}}]}},"crVersion":"2.7.0","image":"perconalab/percona-postgresql-operator:main-ppg17-postgres","imagePullPolicy":"Always","initContainer":{"image":"perconalab/percona-postgresql-operator:PR-1233-c05fe9b81"},"instances":[{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/data":"postgres"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}},"dataVolumeClaimSpec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"name":"instance1","replicas":1}],"pmm":{"customClusterName":"one-pod-pmm-custom-name","enabled":false,"image":"perconalab/pmm-client:dev-latest","postgresParams":"--environment=dev-postgres","secret":"one-pod-pmm-secret","serverHost":"monitoring-service"},"postgresVersion":17,"proxy":{"pgBouncer":{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/role":"pgbouncer"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}},"image":"perconalab/percona-postgresql-operator:main-pgbouncer17","replicas":1}},"users":[{"name":"postgres","password":{"type":"AlphaNumeric"}},{"name":"one-pod","password":{"type":"AlphaNumeric"}}]}} + finalizers: + - internal.percona.com/stop-watchers + labels: + e2e: one-pod + managedFields: '[... elided field over 10 lines long ...]' name: one-pod namespace: kuttl-test-amazed-midge +spec: '[... elided field over 10 lines long ...]' status: + conditions: + - lastTransitionTime: "2025-07-21T16:30:02Z" + message: "" + reason: PGBackRestRepoHostReady + status: "False" + type: ReadyForBackup + host: one-pod-pgbouncer.kuttl-test-amazed-midge.svc + installedCustomExtensions: [] + observedGeneration: 1 + patroniVersion: 4.0.5 pgbouncer: - ready: 1 - size: 1 + ready: 0 + size: 0 postgres: - instances: - - name: instance1 - ready: 1 - size: 1 - ready: 1 - size: 1 - state: ready + imageID: docker.io/perconalab/percona-postgresql-operator@sha256:9bc95df4db2d5c130be5e0a14065f034ba56efbb6d2650f38a63a57a0d9738f1 + instances: [] + ready: 0 + size: 0 + version: 17 + state: initializing case.go:398: resource PerconaPGCluster:kuttl-test-amazed-midge/one-pod: .status.pgbouncer.ready: value mismatch, expected: 1 != actual: 0 logger.go:42: 16:31:56 | one-pod | one-pod events from ns kuttl-test-amazed-midge: logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:51 +0000 UTC Normal Deployment.apps pg-client ScalingReplicaSet Scaled up replica set pg-client-84d6c45668 to 1 deployment-controller logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:52 +0000 UTC Normal Pod pg-client-84d6c45668-5x27r Binding Scheduled Successfully assigned kuttl-test-amazed-midge/pg-client-84d6c45668-5x27r to gke-jen-pg-1233-c05fe9b8-default-pool-dcd9c70c-8kd5 default-scheduler logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:52 +0000 UTC Normal Pod pg-client-84d6c45668-5x27r.spec.containers{pg-client} Pulling Pulling image "perconalab/percona-distribution-postgresql:16" kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:52 +0000 UTC Normal ReplicaSet.apps pg-client-84d6c45668 SuccessfulCreate Created pod: pg-client-84d6c45668-5x27r replicaset-controller logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:53 +0000 UTC Normal Pod pg-client-84d6c45668-5x27r.spec.containers{pg-client} Pulled Successfully pulled image "perconalab/percona-distribution-postgresql:16" in 1.295s (1.295s including waiting). Image size: 324679112 bytes. kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:53 +0000 UTC Normal Pod pg-client-84d6c45668-5x27r.spec.containers{pg-client} Created Created container: pg-client kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:53 +0000 UTC Normal Pod pg-client-84d6c45668-5x27r.spec.containers{pg-client} Started Started container pg-client kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:56 +0000 UTC Normal Pod one-pod-patroni-version-check Binding Scheduled Successfully assigned kuttl-test-amazed-midge/one-pod-patroni-version-check to gke-jen-pg-1233-c05fe9b8-default-pool-dcd9c70c-q69s default-scheduler logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:56 +0000 UTC Normal Pod one-pod-patroni-version-check.spec.containers{patroni-version-check} Pulled Container image "perconalab/percona-postgresql-operator:main-ppg17-postgres" already present on machine kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:56 +0000 UTC Normal Pod one-pod-patroni-version-check.spec.containers{patroni-version-check} Created Created container: patroni-version-check kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:29:56 +0000 UTC Normal Pod one-pod-patroni-version-check.spec.containers{patroni-version-check} Started Started container patroni-version-check kubelet logger.go:42: 16:31:56 | one-pod | 2025-07-21 16:30:02 +0000 UTC Normal Pod one-pod-patroni-version-check.spec.containers{patroni-version-check} Killing Stopping container patroni-version-check kubelet logger.go:42: 16:31:56 | one-pod | Deleting namespace: kuttl-test-amazed-midge === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- FAIL: kuttl (183.23s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/one-pod (182.58s) FAIL