=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://34.42.246.74 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 15 tests === RUN kuttl/harness === RUN kuttl/harness/scheduled-backup === PAUSE kuttl/harness/scheduled-backup === CONT kuttl/harness/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup | Creating namespace: kuttl-test-rational-lamprey logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_client deploy_s3_secrets] logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + source ../../functions logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ realpath ../../.. logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ pwd logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++ test_name=scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export GIT_BRANCH=PR-772 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ GIT_BRANCH=PR-772 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ VERSION=PR-772-bf00908b7 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export PG_VER=16 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ PG_VER=16 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export BUCKET=pg-operator-testing logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ BUCKET=pg-operator-testing logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export PGOV1_TAG=1.4.0 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ PGOV1_TAG=1.4.0 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ export PGOV1_VER=14 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ PGOV1_VER=14 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ which gdate logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ which date logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ which gsed logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++++ which sed logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ sed=/usr/bin/sed logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | +++ command -v oc logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | ++ oc get projects logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + init_temp_dir logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + rm -rf /tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + mkdir -p /tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + deploy_operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + local cw_prefix= logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + destroy_operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + kubectl -n pg-operator delete deployment percona-postgresql-operator --force --grace-period=0 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-postgresql-operator" not found logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + true logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + kubectl delete namespace pg-operator --force --grace-period=0 logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | Error from server (NotFound): namespaces "pg-operator" not found logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + true logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + create_namespace pg-operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + local namespace=pg-operator logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + [[ -n '' ]] logger.go:42: 09:49:19 | scheduled-backup/0-deploy-operator | + kubectl delete namespace pg-operator --ignore-not-found logger.go:42: 09:49:20 | scheduled-backup/0-deploy-operator | + kubectl wait --for=delete namespace pg-operator logger.go:42: 09:49:20 | scheduled-backup/0-deploy-operator | + kubectl create namespace pg-operator logger.go:42: 09:49:21 | scheduled-backup/0-deploy-operator | namespace/pg-operator created logger.go:42: 09:49:21 | scheduled-backup/0-deploy-operator | + cw_prefix=cw- logger.go:42: 09:49:21 | scheduled-backup/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy/crd.yaml logger.go:42: 09:49:21 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/crunchybridgeclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 09:49:21 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgbackups.pgv2.percona.com serverside-applied logger.go:42: 09:49:23 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgclusters.pgv2.percona.com serverside-applied logger.go:42: 09:49:24 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgrestores.pgv2.percona.com serverside-applied logger.go:42: 09:49:24 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgadmins.postgres-operator.crunchydata.com serverside-applied logger.go:42: 09:49:25 | scheduled-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/postgresclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 09:49:25 | scheduled-backup/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy/cw-rbac.yaml logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | serviceaccount/percona-postgresql-operator serverside-applied logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-postgresql-operator serverside-applied logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | + local disable_telemetry=true logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | + '[' scheduled-backup == telemetry-transfer ']' logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | + yq eval '.spec.template.spec.containers[0].image = "perconalab/percona-postgresql-operator:PR-772-bf00908b7"' /mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy/cw-operator.yaml logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | + yq eval '(.spec.template.spec.containers[] | select(.name=="operator") | .env[] | select(.name=="DISABLE_TELEMETRY") | .value) = "true"' - logger.go:42: 09:49:26 | scheduled-backup/0-deploy-operator | + kubectl -n pg-operator apply -f - logger.go:42: 09:49:27 | scheduled-backup/0-deploy-operator | deployment.apps/percona-postgresql-operator created logger.go:42: 09:49:27 | scheduled-backup/0-deploy-operator | + deploy_client logger.go:42: 09:49:27 | scheduled-backup/0-deploy-operator | + kubectl -n kuttl-test-rational-lamprey apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf/client.yaml logger.go:42: 09:49:28 | scheduled-backup/0-deploy-operator | deployment.apps/pg-client created logger.go:42: 09:49:28 | scheduled-backup/0-deploy-operator | + deploy_s3_secrets logger.go:42: 09:49:28 | scheduled-backup/0-deploy-operator | + set +o xtrace logger.go:42: 09:49:28 | scheduled-backup/0-deploy-operator | secret/scheduled-backup-pgbackrest-secrets created logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | percona-postgresql-operator pg-operator 1 logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | ASSERT PASS logger.go:42: 09:49:30 | scheduled-backup/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | starting test step 1-create-cluster logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr ${RANDOM} | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + source ../../functions logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ realpath ../../.. logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ pwd logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++ test_name=scheduled-backup logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export GIT_BRANCH=PR-772 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ GIT_BRANCH=PR-772 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ VERSION=PR-772-bf00908b7 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export PG_VER=16 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ PG_VER=16 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export BUCKET=pg-operator-testing logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ BUCKET=pg-operator-testing logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export PGOV1_TAG=1.4.0 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ PGOV1_TAG=1.4.0 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ export PGOV1_VER=14 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ PGOV1_VER=14 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ which gdate logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ which date logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ which gsed logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++++ which sed logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ sed=/usr/bin/sed logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | +++ command -v oc logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ++ oc get projects logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + get_cr 4159 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + local name_suffix=4159 logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + kubectl -n kuttl-test-rational-lamprey apply -f - logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + yq eval ' logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .metadata.name = "scheduled-backup" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .metadata.labels = {"e2e":"scheduled-backup"} | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.postgresVersion = 16 | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.users += [{"name":"postgres","password":{"type":"AlphaNumeric"}}] | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.users += [{"name":"scheduled-backup","password":{"type":"AlphaNumeric"}}] | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.image = "perconalab/percona-postgresql-operator:main-ppg16-postgres" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.pmm.secret = "scheduled-backup-pmm-secret" logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ' /mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy/cr.yaml logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + [[ -n '' ]] logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + case $test_name in logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + yq eval -i ' logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.configuration = [{"secret":{"name":"scheduled-backup-pgbackrest-secrets"}}] | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.manual.repoName = "repo1" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.manual.options = ["--type=full"] | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.global.repo1-path = "/backrestrepo/postgres-operator/scheduled-backup-4159/repo1" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.global.repo2-path = "/backrestrepo/postgres-operator/scheduled-backup-4159/repo2" | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.repos = [{"name":"repo1","s3":{"bucket":"pg-operator-testing","endpoint":"s3.amazonaws.com","region":"us-east-1"}}] | logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | .spec.backups.pgbackrest.repos += [{"name":"repo2","gcs":{"bucket":"pg-operator-testing"}}] logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | ' /tmp/kuttl/pg/scheduled-backup/cr.yaml logger.go:42: 09:49:30 | scheduled-backup/1-create-cluster | + cat /tmp/kuttl/pg/scheduled-backup/cr.yaml logger.go:42: 09:49:31 | scheduled-backup/1-create-cluster | perconapgcluster.pgv2.percona.com/scheduled-backup created logger.go:42: 09:51:24 | scheduled-backup/1-create-cluster | test step completed 1-create-cluster logger.go:42: 09:51:24 | scheduled-backup/2-write-data | starting test step 2-write-data logger.go:42: 09:51:24 | scheduled-backup/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_psql_local \ 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' \ "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)" run_psql_local \ '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' \ "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)"] logger.go:42: 09:51:24 | scheduled-backup/2-write-data | + source ../../functions logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ realpath ../../.. logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ pwd logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ test_name=scheduled-backup logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export GIT_BRANCH=PR-772 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ GIT_BRANCH=PR-772 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ VERSION=PR-772-bf00908b7 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export PG_VER=16 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ PG_VER=16 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export BUCKET=pg-operator-testing logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ BUCKET=pg-operator-testing logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ export PGOV1_VER=14 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ PGOV1_VER=14 logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ which gdate logger.go:42: 09:51:24 | scheduled-backup/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ which date logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ date=/usr/bin/date logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ which gsed logger.go:42: 09:51:24 | scheduled-backup/2-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++++ which sed logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ sed=/usr/bin/sed logger.go:42: 09:51:24 | scheduled-backup/2-write-data | +++ command -v oc logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ oc get projects logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:24 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 09:51:25 | scheduled-backup/2-write-data | ++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 09:51:25 | scheduled-backup/2-write-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:25 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 09:51:25 | scheduled-backup/2-write-data | + run_psql_local 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:25 | scheduled-backup/2-write-data | + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' logger.go:42: 09:51:25 | scheduled-backup/2-write-data | + local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:25 | scheduled-backup/2-write-data | + local driver=postgres logger.go:42: 09:51:25 | scheduled-backup/2-write-data | ++ get_client_pod logger.go:42: 09:51:25 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:51:25 | scheduled-backup/2-write-data | + kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:27 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 09:51:28 | scheduled-backup/2-write-data | + run_psql_local '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:28 | scheduled-backup/2-write-data | + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' logger.go:42: 09:51:28 | scheduled-backup/2-write-data | + local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:28 | scheduled-backup/2-write-data | + local driver=postgres logger.go:42: 09:51:28 | scheduled-backup/2-write-data | ++ get_client_pod logger.go:42: 09:51:28 | scheduled-backup/2-write-data | ++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:51:28 | scheduled-backup/2-write-data | + kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 09:51:30 | scheduled-backup/2-write-data | test step completed 2-write-data logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | + source ../../functions logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ realpath ../../.. logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++++ pwd logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++ test_name=scheduled-backup logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export GIT_BRANCH=PR-772 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ GIT_BRANCH=PR-772 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ VERSION=PR-772-bf00908b7 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export PG_VER=16 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ PG_VER=16 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export BUCKET=pg-operator-testing logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ BUCKET=pg-operator-testing logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export PGOV1_TAG=1.4.0 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ PGOV1_TAG=1.4.0 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ export PGOV1_VER=14 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ PGOV1_VER=14 logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++++ which gdate logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++++ which date logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | +++ date=/usr/bin/date logger.go:42: 09:51:30 | scheduled-backup/3-read-from-primary | ++++ which gsed logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++++ which sed logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ sed=/usr/bin/sed logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ command -v oc logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++ oc get projects logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | ++ local driver=postgres logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ get_client_pod logger.go:42: 09:51:31 | scheduled-backup/3-read-from-primary | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:51:32 | scheduled-backup/3-read-from-primary | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 09:51:33 | scheduled-backup/3-read-from-primary | + data=' 100500' logger.go:42: 09:51:33 | scheduled-backup/3-read-from-primary | + kubectl create configmap -n kuttl-test-rational-lamprey 03-read-from-primary '--from-literal=data= 100500' logger.go:42: 09:51:34 | scheduled-backup/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 09:51:35 | scheduled-backup/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 09:51:35 | scheduled-backup/4-engage-s3 | starting test step 4-engage-s3 logger.go:42: 09:51:36 | scheduled-backup/4-engage-s3 | PerconaPGCluster:kuttl-test-rational-lamprey/scheduled-backup updated logger.go:42: 09:58:50 | scheduled-backup/4-engage-s3 | test step completed 4-engage-s3 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | starting test step 5-wait-second-backup logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions while true; do succeeded_backups=$(kubectl get -n "$NAMESPACE" pg-backup -o yaml | yq '.items | map(select(.status.state == "Succeeded")) | length') if [[ $succeeded_backups -ge 3 ]]; then break else echo "waiting for 3 succeeded pg-backups" sleep 1 fi done] logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | + source ../../functions logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ realpath ../../.. logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ pwd logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ test_name=scheduled-backup logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export GIT_BRANCH=PR-772 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ GIT_BRANCH=PR-772 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ VERSION=PR-772-bf00908b7 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export PG_VER=16 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ PG_VER=16 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export BUCKET=pg-operator-testing logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ BUCKET=pg-operator-testing logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export PGOV1_TAG=1.4.0 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ PGOV1_TAG=1.4.0 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ export PGOV1_VER=14 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ PGOV1_VER=14 logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ which gdate logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ which date logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ date=/usr/bin/date logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ which gsed logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++++ which sed logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ sed=/usr/bin/sed logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | +++ command -v oc logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ oc get projects logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:50 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:51 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:51 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:51 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:51 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:51 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:52 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:53 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:53 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:53 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:54 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:54 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:54 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:54 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:54 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:55 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:56 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:57 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:57 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:57 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:58 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:58 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:58 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:58 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:58 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:58:59 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:00 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:01 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:01 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:01 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:02 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:02 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:02 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:02 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:02 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:03 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:04 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:04 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:04 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:05 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:05 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:05 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:05 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:05 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:06 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:07 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:08 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:08 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:08 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:09 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:09 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:09 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:09 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:09 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:10 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:11 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:11 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:11 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:12 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:12 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:12 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:12 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:12 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:13 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:14 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:15 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:15 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:15 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:16 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:16 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:16 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:16 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:16 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:17 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:18 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:18 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:18 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:19 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:19 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:19 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:19 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:19 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:20 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:21 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:22 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:22 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:22 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:23 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:23 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:23 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:23 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:23 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:24 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:25 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:26 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:26 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:26 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:27 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:27 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:27 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:27 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:27 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:28 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:29 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:30 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:30 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:30 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:31 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:31 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:31 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:31 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:31 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:32 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:33 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:33 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:33 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:34 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:34 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:34 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:34 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:34 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:35 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:36 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:37 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:37 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:37 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:38 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:38 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:38 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:38 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:38 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:39 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:40 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:40 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:40 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:41 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:41 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:41 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:41 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:41 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:42 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:43 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:44 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:44 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:44 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:45 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:45 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:45 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:45 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:45 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:46 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:47 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:48 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:48 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:48 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:49 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:49 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:49 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:49 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:49 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:50 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:51 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:51 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:51 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:52 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:52 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:52 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:52 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:52 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:53 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:54 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:55 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:55 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:55 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:56 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:56 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:56 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:56 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:56 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:57 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 09:59:58 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 09:59:59 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 09:59:59 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 09:59:59 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:00 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:00 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:00 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:00 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:00 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:01 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:02 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:02 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:02 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:03 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:03 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:03 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:03 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:03 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:04 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:05 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:06 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:06 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:06 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:07 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:07 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:07 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:07 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:07 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:08 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:09 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:10 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:10 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:10 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:11 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:11 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:11 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:11 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:11 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:12 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:13 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:13 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:13 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:14 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:14 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:14 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:14 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:14 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:15 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:16 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:17 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:17 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:17 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:18 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:18 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:18 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:18 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:18 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:19 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:20 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:21 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:21 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:21 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:22 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:22 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:22 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:22 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:22 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:23 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:24 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:24 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:24 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:25 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:25 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:25 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:25 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:25 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:26 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:27 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:28 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:28 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:28 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:29 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:29 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:29 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:29 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:29 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:30 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:31 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:31 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:31 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:32 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:32 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:32 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:32 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:32 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:33 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:34 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:34 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:34 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:35 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:35 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:35 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:35 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:35 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:36 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:37 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:38 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:38 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:38 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:39 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:39 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:39 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:39 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:39 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:40 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:41 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:41 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:41 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:42 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:42 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:42 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:42 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:42 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:43 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:44 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:45 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:45 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:45 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:46 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:46 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:46 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:46 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:46 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:47 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:48 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:48 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:48 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:49 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:49 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:49 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:49 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:49 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:50 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:51 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:52 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:52 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:52 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:53 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:53 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:53 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:53 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:53 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:54 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:55 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:56 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:56 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:56 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:57 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:57 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:57 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:57 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:57 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:00:58 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:00:59 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:00:59 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:00:59 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:00 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:00 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:00 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:00 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:00 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:01 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:02 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:03 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:03 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:03 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:04 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:04 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:04 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:04 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:04 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:05 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:06 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:07 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:07 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:07 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:08 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:08 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:08 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:08 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:08 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:09 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:10 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:10 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:10 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:11 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:11 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:11 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:11 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:11 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:12 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:13 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:14 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:14 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:14 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:15 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:15 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:15 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:15 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:15 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:16 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:17 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:18 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:18 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:18 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:19 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:19 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:19 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:19 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:19 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:20 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:21 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:21 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:21 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:22 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:22 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:22 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:22 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:22 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:23 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:24 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:25 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:25 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:25 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:26 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:26 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:26 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:26 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:26 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:27 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:28 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:29 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:29 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:29 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:30 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:30 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:30 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:30 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:30 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:31 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:31 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:31 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:32 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:32 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:32 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:32 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:32 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:33 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:34 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:35 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:35 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:35 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:36 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:36 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:36 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:36 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:36 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:37 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:38 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:39 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:39 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:39 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:40 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:40 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:40 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:40 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:40 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:41 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:42 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:42 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:42 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:43 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:43 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:43 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:43 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:43 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:44 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:45 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:46 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:46 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:46 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:47 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:47 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:47 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:47 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:47 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:48 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:49 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:50 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:50 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:50 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:51 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:51 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:51 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:51 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:51 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:52 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:53 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:53 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:53 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:54 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:54 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:54 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:54 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:54 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:55 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:56 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:57 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:57 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:57 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:58 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:58 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:58 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:58 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:58 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:01:59 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:00 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:00 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:00 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:01 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:01 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:01 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:01 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:01 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:02 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:03 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:04 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:04 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:04 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:05 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:05 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:05 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:05 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:05 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:06 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:07 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:07 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:07 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:08 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:08 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:08 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:08 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:08 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:09 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:10 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:11 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:11 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:11 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:12 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:12 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:12 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:12 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:12 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | + succeeded_backups=2 logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | + [[ 2 -ge 3 ]] logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | + echo 'waiting for 3 succeeded pg-backups' logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | waiting for 3 succeeded pg-backups logger.go:42: 10:02:13 | scheduled-backup/5-wait-second-backup | + sleep 1 logger.go:42: 10:02:14 | scheduled-backup/5-wait-second-backup | + true logger.go:42: 10:02:14 | scheduled-backup/5-wait-second-backup | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:02:14 | scheduled-backup/5-wait-second-backup | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:02:15 | scheduled-backup/5-wait-second-backup | + succeeded_backups=3 logger.go:42: 10:02:15 | scheduled-backup/5-wait-second-backup | + [[ 3 -ge 3 ]] logger.go:42: 10:02:15 | scheduled-backup/5-wait-second-backup | + break logger.go:42: 10:02:16 | scheduled-backup/5-wait-second-backup | test step completed 5-wait-second-backup logger.go:42: 10:02:16 | scheduled-backup/6-engage-gcs | starting test step 6-engage-gcs logger.go:42: 10:02:17 | scheduled-backup/6-engage-gcs | PerconaPGCluster:kuttl-test-rational-lamprey/scheduled-backup updated logger.go:42: 10:03:23 | scheduled-backup/6-engage-gcs | test step completed 6-engage-gcs logger.go:42: 10:03:23 | scheduled-backup/7-disable-schedule | starting test step 7-disable-schedule logger.go:42: 10:03:24 | scheduled-backup/7-disable-schedule | PerconaPGCluster:kuttl-test-rational-lamprey/scheduled-backup updated logger.go:42: 10:06:25 | scheduled-backup/7-disable-schedule | test step completed 7-disable-schedule logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | starting test step 8-add-more-data logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions PITR_TARGET_TIME=$(run_psql_local \ 'select now();' \ "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl -n ${NAMESPACE} create configmap pitr-target --from-literal=pitr="${PITR_TARGET_TIME/ /}" run_psql_local \ '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' \ "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)" data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 07-add-more-data --from-literal=data="${data}" sleep 30 # wait for wal to get archived] logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | + source ../../functions logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ realpath ../../.. logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ pwd logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++ test_name=scheduled-backup logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ GIT_BRANCH=PR-772 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export PG_VER=16 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ PG_VER=16 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export BUCKET=pg-operator-testing logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ BUCKET=pg-operator-testing logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ export PGOV1_VER=14 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ PGOV1_VER=14 logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ which gdate logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ which date logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ date=/usr/bin/date logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ which gsed logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++++ which sed logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ sed=/usr/bin/sed logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ command -v oc logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | ++ oc get projects logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:25 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | ++ run_psql_local 'select now();' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | ++ local 'command=select now();' logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | ++ local driver=postgres logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | +++ get_client_pod logger.go:42: 10:06:26 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:06:27 | scheduled-backup/8-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''select now();\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | + PITR_TARGET_TIME=' 2024-05-22 10:06:28.494697+00' logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | + kubectl -n kuttl-test-rational-lamprey create configmap pitr-target '--from-literal=pitr=2024-05-22 10:06:28.494697+00' logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | configmap/pitr-target created logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | ++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:28 | scheduled-backup/8-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | ++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | + run_psql_local '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | + local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | + local driver=postgres logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | ++ get_client_pod logger.go:42: 10:06:29 | scheduled-backup/8-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:06:30 | scheduled-backup/8-add-more-data | + kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:06:31 | scheduled-backup/8-add-more-data | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:06:31 | scheduled-backup/8-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:31 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | ++ local driver=postgres logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | +++ get_client_pod logger.go:42: 10:06:32 | scheduled-backup/8-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:06:33 | scheduled-backup/8-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | + data=' 100500 logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | 100501' logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | + kubectl create configmap -n kuttl-test-rational-lamprey 07-add-more-data '--from-literal=data= 100500 logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | 100501' logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | configmap/07-add-more-data created logger.go:42: 10:06:34 | scheduled-backup/8-add-more-data | + sleep 30 logger.go:42: 10:07:06 | scheduled-backup/8-add-more-data | test step completed 8-add-more-data logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | starting test step 9-start-s3-restore logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | running command: [sh -c set -o errexit set -o xtrace source ../../functions cat << EOF > ${TEMP_DIR}/restore.yaml apiVersion: pgv2.percona.com/v2 kind: PerconaPGRestore metadata: name: s3-restore spec: pgCluster: scheduled-backup repoName: repo1 options: - --type=time - --target="$(kubectl -n ${NAMESPACE} get configmap pitr-target --template={{.data.pitr}})" EOF kubectl -n ${NAMESPACE} apply -f ${TEMP_DIR}/restore.yaml] logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | + source ../../functions logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ realpath ../../.. logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ pwd logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++ test_name=scheduled-backup logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ GIT_BRANCH=PR-772 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export PG_VER=16 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ PG_VER=16 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export BUCKET=pg-operator-testing logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ BUCKET=pg-operator-testing logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ export PGOV1_VER=14 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ PGOV1_VER=14 logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ which gdate logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ which date logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ date=/usr/bin/date logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ which gsed logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++++ which sed logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ sed=/usr/bin/sed logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | +++ command -v oc logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++ oc get projects logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | + cat logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | ++ kubectl -n kuttl-test-rational-lamprey get configmap pitr-target '--template={{.data.pitr}}' logger.go:42: 10:07:06 | scheduled-backup/9-start-s3-restore | + kubectl -n kuttl-test-rational-lamprey apply -f /tmp/kuttl/pg/scheduled-backup/restore.yaml logger.go:42: 10:07:07 | scheduled-backup/9-start-s3-restore | perconapgrestore.pgv2.percona.com/s3-restore created logger.go:42: 10:08:28 | scheduled-backup/9-start-s3-restore | test step completed 9-start-s3-restore logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | starting test step 10-verify-restored-data logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 09-verify-restored-data --from-literal=data="${data}"] logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | + source ../../functions logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ realpath ../../.. logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ pwd logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++ test_name=scheduled-backup logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ GIT_BRANCH=PR-772 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export PG_VER=16 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ PG_VER=16 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export BUCKET=pg-operator-testing logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ BUCKET=pg-operator-testing logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ export PGOV1_VER=14 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ PGOV1_VER=14 logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ which gdate logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ which date logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ date=/usr/bin/date logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ which gsed logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++++ which sed logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ sed=/usr/bin/sed logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ command -v oc logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | ++ oc get projects logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:28 | scheduled-backup/10-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | ++ local driver=postgres logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | +++ get_client_pod logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:08:29 | scheduled-backup/10-verify-restored-data | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:08:31 | scheduled-backup/10-verify-restored-data | + data=' 100500' logger.go:42: 10:08:31 | scheduled-backup/10-verify-restored-data | + kubectl create configmap -n kuttl-test-rational-lamprey 09-verify-restored-data '--from-literal=data= 100500' logger.go:42: 10:08:31 | scheduled-backup/10-verify-restored-data | configmap/09-verify-restored-data created logger.go:42: 10:08:32 | scheduled-backup/10-verify-restored-data | test step completed 10-verify-restored-data logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | starting test step 11-add-more-data logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_psql_local \ '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' \ "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)" data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 10-add-more-data --from-literal=data="${data}" sleep 30 # wait for wal to get archived] logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | + source ../../functions logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ realpath ../../.. logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ pwd logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ test_name=scheduled-backup logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ GIT_BRANCH=PR-772 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export PG_VER=16 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ PG_VER=16 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export BUCKET=pg-operator-testing logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ BUCKET=pg-operator-testing logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ export PGOV1_VER=14 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ PGOV1_VER=14 logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ which gdate logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ which date logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ date=/usr/bin/date logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ which gsed logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++++ which sed logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ sed=/usr/bin/sed logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | +++ command -v oc logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ oc get projects logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:32 | scheduled-backup/11-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | ++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | ++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | + run_psql_local '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | + local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | + local driver=postgres logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | ++ get_client_pod logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:08:33 | scheduled-backup/11-add-more-data | + kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:08:35 | scheduled-backup/11-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | ++ local driver=postgres logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | +++ get_client_pod logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:08:36 | scheduled-backup/11-add-more-data | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | + data=' 100500 logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | 100501' logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | + kubectl create configmap -n kuttl-test-rational-lamprey 10-add-more-data '--from-literal=data= 100500 logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | 100501' logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | configmap/10-add-more-data created logger.go:42: 10:08:38 | scheduled-backup/11-add-more-data | + sleep 30 logger.go:42: 10:09:09 | scheduled-backup/11-add-more-data | test step completed 11-add-more-data logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | starting test step 12-start-gcs-restore logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | running command: [sh -c set -o errexit set -o xtrace source ../../functions cat << EOF > ${TEMP_DIR}/restore.yaml apiVersion: pgv2.percona.com/v2 kind: PerconaPGRestore metadata: name: gcs-restore spec: pgCluster: scheduled-backup repoName: repo2 options: - --type=time - --target="$(kubectl -n ${NAMESPACE} get configmap pitr-target --template={{.data.pitr}})" EOF kubectl -n ${NAMESPACE} apply -f ${TEMP_DIR}/restore.yaml] logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | + source ../../functions logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ realpath ../../.. logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ pwd logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++ test_name=scheduled-backup logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ GIT_BRANCH=PR-772 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export PG_VER=16 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ PG_VER=16 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export BUCKET=pg-operator-testing logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ BUCKET=pg-operator-testing logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ export PGOV1_VER=14 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ PGOV1_VER=14 logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ which gdate logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ which date logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ date=/usr/bin/date logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ which gsed logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++++ which sed logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ sed=/usr/bin/sed logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | +++ command -v oc logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++ oc get projects logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | + cat logger.go:42: 10:09:09 | scheduled-backup/12-start-gcs-restore | ++ kubectl -n kuttl-test-rational-lamprey get configmap pitr-target '--template={{.data.pitr}}' logger.go:42: 10:09:10 | scheduled-backup/12-start-gcs-restore | + kubectl -n kuttl-test-rational-lamprey apply -f /tmp/kuttl/pg/scheduled-backup/restore.yaml logger.go:42: 10:09:10 | scheduled-backup/12-start-gcs-restore | perconapgrestore.pgv2.percona.com/gcs-restore created logger.go:42: 10:10:10 | scheduled-backup/12-start-gcs-restore | test step completed 12-start-gcs-restore logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | starting test step 13-verify-restored-data logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass scheduled-backup-pguser-postgres)@$(get_psql_user_host scheduled-backup-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 12-verify-restored-data --from-literal=data="${data}"] logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | + source ../../functions logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ realpath ../../.. logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ pwd logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ test_name=scheduled-backup logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ GIT_BRANCH=PR-772 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export PG_VER=16 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ PG_VER=16 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export BUCKET=pg-operator-testing logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ BUCKET=pg-operator-testing logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ export PGOV1_VER=14 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ PGOV1_VER=14 logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ which gdate logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ which date logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ date=/usr/bin/date logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ which gsed logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++++ which sed logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ sed=/usr/bin/sed logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ command -v oc logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ oc get projects logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ get_psql_user_pass scheduled-backup-pguser-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ get_psql_user_host scheduled-backup-pguser-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ local secret_name=scheduled-backup-pguser-postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get secret/scheduled-backup-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ local uri=postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | ++ local driver=postgres logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ get_client_pod logger.go:42: 10:10:10 | scheduled-backup/13-verify-restored-data | +++ kubectl -n kuttl-test-rational-lamprey get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:10:11 | scheduled-backup/13-verify-restored-data | ++ kubectl -n kuttl-test-rational-lamprey exec pg-client-5b6b7b7b78-7q62t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:6sNVKIeYDxUJSQWrnuvqzN0s@scheduled-backup-primary.kuttl-test-rational-lamprey.svc'\''' logger.go:42: 10:10:12 | scheduled-backup/13-verify-restored-data | + data=' 100500' logger.go:42: 10:10:12 | scheduled-backup/13-verify-restored-data | + kubectl create configmap -n kuttl-test-rational-lamprey 12-verify-restored-data '--from-literal=data= 100500' logger.go:42: 10:10:13 | scheduled-backup/13-verify-restored-data | configmap/12-verify-restored-data created logger.go:42: 10:10:14 | scheduled-backup/13-verify-restored-data | test step completed 13-verify-restored-data logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | starting test step 14-check-pgbackup-job logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_jobs_and_pgbackups "scheduled-backup" succeeded_backups=$(kubectl get -n "$NAMESPACE" pg-backup -o yaml | yq '.items | map(select(.status.state == "Succeeded")) | length') succeeded_jobs=$(kubectl get -n "$NAMESPACE" job -o yaml | yq '.items | map(select(.status.succeeded == "1")) | length') if [[ $succeeded_backups -lt 4 ]]; then echo "ERROR: there are less than 4 succeeded pg-backups" exit 1 fi if [[ $succeeded_backups != "$succeeded_jobs" ]]; then echo "ERROR: succeeded pg-backups != succeeded jobs" exit 1 fi] logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + source ../../functions logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ realpath ../../.. logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ pwd logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ test_name=scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ GIT_BRANCH=PR-772 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export PG_VER=16 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ PG_VER=16 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export BUCKET=pg-operator-testing logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ BUCKET=pg-operator-testing logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ export PGOV1_VER=14 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ PGOV1_VER=14 logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ which gdate logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ which date logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ date=/usr/bin/date logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ which gsed logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++++ which sed logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ sed=/usr/bin/sed logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | +++ command -v oc logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ oc get projects logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + check_jobs_and_pgbackups scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + local cluster=scheduled-backup logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + job_names=() logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + kubectl get job -n kuttl-test-rational-lamprey -o 'jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}' logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + IFS= logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + read -r line logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + job_names+=("$line") logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + IFS= logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + read -r line logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + job_names+=("$line") logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + IFS= logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + read -r line logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + job_names+=("$line") logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + IFS= logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + read -r line logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + job_names+=("$line") logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + IFS= logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + read -r line logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + (( i = 0 )) logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | + (( i < 0 )) logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ kubectl get -n kuttl-test-rational-lamprey pg-backup -o yaml logger.go:42: 10:10:14 | scheduled-backup/14-check-pgbackup-job | ++ yq '.items | map(select(.status.state == "Succeeded")) | length' logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | + succeeded_backups=4 logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | ++ kubectl get -n kuttl-test-rational-lamprey job -o yaml logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | ++ yq '.items | map(select(.status.succeeded == "1")) | length' logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | + succeeded_jobs=4 logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | + [[ 4 -lt 4 ]] logger.go:42: 10:10:15 | scheduled-backup/14-check-pgbackup-job | + [[ 4 != \4 ]] logger.go:42: 10:10:16 | scheduled-backup/14-check-pgbackup-job | test step completed 14-check-pgbackup-job logger.go:42: 10:10:16 | scheduled-backup/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/tests/scheduled-backup logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++ test_name=scheduled-backup logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/vars.sh logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-772 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/deploy logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-772/e2e-tests/conf logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/pg/scheduled-backup logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-772 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-772 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export VERSION=PR-772-bf00908b7 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ VERSION=PR-772-bf00908b7 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-postgresql-operator:PR-772-bf00908b7 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export PG_VER=16 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ PG_VER=16 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-ppg16-pgbouncer logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg16-postgres logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-ppg16-pgbackrest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export BUCKET=pg-operator-testing logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ BUCKET=pg-operator-testing logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export PGOV1_TAG=1.4.0 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ PGOV1_TAG=1.4.0 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ export PGOV1_VER=14 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ PGOV1_VER=14 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ which date logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ which gsed logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-772/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++++ which sed logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ sed=/usr/bin/sed logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | +++ command -v oc logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | + kubectl -n pg-operator delete deployment percona-postgresql-operator --force --grace-period=0 logger.go:42: 10:10:24 | scheduled-backup/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:10:25 | scheduled-backup/99-remove-cluster-gracefully | deployment.apps "percona-postgresql-operator" force deleted logger.go:42: 10:10:25 | scheduled-backup/99-remove-cluster-gracefully | + [[ -n pg-operator ]] logger.go:42: 10:10:25 | scheduled-backup/99-remove-cluster-gracefully | + kubectl delete namespace pg-operator --force --grace-period=0 logger.go:42: 10:10:25 | scheduled-backup/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:10:25 | scheduled-backup/99-remove-cluster-gracefully | namespace "pg-operator" force deleted logger.go:42: 10:10:32 | scheduled-backup/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 10:10:32 | scheduled-backup | scheduled-backup events from ns kuttl-test-rational-lamprey: logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:28 +0000 UTC Normal Pod pg-client-5b6b7b7b78-7q62t Scheduled Successfully assigned kuttl-test-rational-lamprey/pg-client-5b6b7b7b78-7q62t to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:28 +0000 UTC Normal ReplicaSet.apps pg-client-5b6b7b7b78 SuccessfulCreate Created pod: pg-client-5b6b7b7b78-7q62t replicaset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:28 +0000 UTC Normal Deployment.apps pg-client ScalingReplicaSet Scaled up replica set pg-client-5b6b7b7b78 to 1 deployment-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:29 +0000 UTC Normal Pod pg-client-5b6b7b7b78-7q62t.spec.containers{pg-client} Pulling Pulling image "perconalab/percona-distribution-postgresql:15" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:29 +0000 UTC Normal Pod pg-client-5b6b7b7b78-7q62t.spec.containers{pg-client} Pulled Successfully pulled image "perconalab/percona-distribution-postgresql:15" in 98.215998ms (98.236663ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:29 +0000 UTC Normal Pod pg-client-5b6b7b7b78-7q62t.spec.containers{pg-client} Created Created container pg-client kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:29 +0000 UTC Normal Pod pg-client-5b6b7b7b78-7q62t.spec.containers{pg-client} Started Started container pg-client kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-7pwd-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-7pwd-pgdata ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-7pwd-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-rational-lamprey/scheduled-backup-instance1-7pwd-pgdata" pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-7pwd SuccessfulCreate create Pod scheduled-backup-instance1-7pwd-0 in StatefulSet scheduled-backup-instance1-7pwd successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-lxpf-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-lxpf-pgdata ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-lxpf-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-rational-lamprey/scheduled-backup-instance1-lxpf-pgdata" pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-lxpf SuccessfulCreate create Pod scheduled-backup-instance1-lxpf-0 in StatefulSet scheduled-backup-instance1-lxpf successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-nsn7-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:33 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-nsn7 SuccessfulCreate create Pod scheduled-backup-instance1-nsn7-0 in StatefulSet scheduled-backup-instance1-nsn7 successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-nsn7-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-rational-lamprey/scheduled-backup-instance1-nsn7-pgdata" pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-nsn7-pgdata ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-pgbouncer-5c64475cff-728sz to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-pgbouncer-5c64475cff-smqkh to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-thzf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-pgbouncer-5c64475cff-x4z6n to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-vz2w default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal ReplicaSet.apps scheduled-backup-pgbouncer-5c64475cff SuccessfulCreate Created pod: scheduled-backup-pgbouncer-5c64475cff-728sz replicaset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal ReplicaSet.apps scheduled-backup-pgbouncer-5c64475cff SuccessfulCreate Created pod: scheduled-backup-pgbouncer-5c64475cff-x4z6n replicaset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal ReplicaSet.apps scheduled-backup-pgbouncer-5c64475cff SuccessfulCreate Created pod: scheduled-backup-pgbouncer-5c64475cff-smqkh replicaset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal Deployment.apps scheduled-backup-pgbouncer ScalingReplicaSet Scaled up replica set scheduled-backup-pgbouncer-5c64475cff to 3 deployment-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:34 +0000 UTC Normal PodDisruptionBudget.policy scheduled-backup-pgbouncer NoPods No matching pods found controllermanager logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 85.295672ms (85.314538ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer} Created Created container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 96.890235ms (96.907342ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer-config} Created Created container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 107.451909ms (107.470391ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer} Created Created container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 78.880702ms (78.900574ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer-config} Created Created container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:35 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:36 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-7pwd-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-vz2w default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-7pwd-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-af79a79c-5f46-414c-9ad6-d03d142e274b pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-lxpf-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-5976192b-cbc8-4f52-a14a-5dd17014a494 pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-nsn7-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-thzf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal PersistentVolumeClaim scheduled-backup-instance1-nsn7-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-a2be3d20-6dd1-4039-bcaf-b61bc96dedc9 pd.csi.storage.gke.io_gke-07e3a5b96be84f029e3a-4572-e93e-vm_1e8061a7-7e39-4c16-b556-823f3508732f logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 110.252685ms (110.275871ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer} Created Created container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbouncer" in 86.591623ms (86.606757ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer-config} Created Created container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:37 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:38 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-lxpf-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:41 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-af79a79c-5f46-414c-9ad6-d03d142e274b" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:42 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-5976192b-cbc8-4f52-a14a-5dd17014a494" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 107.371038ms (107.391187ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:44 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 101.70158ms (101.710448ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:44 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:44 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:44 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 86.981258ms (87.001856ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 115.561707ms (115.577866ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 113.891158ms (113.908996ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:45 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a2be3d20-6dd1-4039-bcaf-b61bc96dedc9" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:46 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:46 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 110.2786ms (110.293788ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:46 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:46 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 104.706959ms (104.716475ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 106.835708ms (106.844173ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 114.342801ms (114.359178ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 97.821772ms (97.838256ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:47 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 99.257009ms (99.274651ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 98.894659ms (98.910789ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:48 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:49 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:50 +0000 UTC Warning Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:51 +0000 UTC Warning Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:52 +0000 UTC Normal Pod scheduled-backup-backup-7clh-7hwbv Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-backup-7clh-7hwbv to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:52 +0000 UTC Normal Job.batch scheduled-backup-backup-7clh SuccessfulCreate Created pod: scheduled-backup-backup-7clh-7hwbv job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:52 +0000 UTC Normal PostgresCluster.postgres-operator.crunchydata.com scheduled-backup StanzasCreated pgBackRest stanza creation completed successfully postgrescluster-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:53 +0000 UTC Normal Pod scheduled-backup-backup-7clh-7hwbv.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:53 +0000 UTC Normal Pod scheduled-backup-backup-7clh-7hwbv.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 108.085179ms (108.110563ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:53 +0000 UTC Normal Pod scheduled-backup-backup-7clh-7hwbv.spec.containers{pgbackrest} Created Created container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:49:53 +0000 UTC Normal Pod scheduled-backup-backup-7clh-7hwbv.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:51:22 +0000 UTC Normal Job.batch scheduled-backup-backup-7clh Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:39 +0000 UTC Normal Pod scheduled-backup-backup-lk97-fmscl Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-backup-lk97-fmscl to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:39 +0000 UTC Normal Job.batch scheduled-backup-backup-lk97 SuccessfulCreate Created pod: scheduled-backup-backup-lk97-fmscl job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:40 +0000 UTC Normal Pod scheduled-backup-backup-lk97-fmscl.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:40 +0000 UTC Normal Pod scheduled-backup-backup-lk97-fmscl.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 115.349098ms (115.388435ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:40 +0000 UTC Normal Pod scheduled-backup-backup-lk97-fmscl.spec.containers{pgbackrest} Created Created container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:55:40 +0000 UTC Normal Pod scheduled-backup-backup-lk97-fmscl.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 09:58:45 +0000 UTC Normal Job.batch scheduled-backup-backup-lk97 Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:39 +0000 UTC Normal Pod scheduled-backup-backup-9495-59c4w Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-backup-9495-59c4w to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:39 +0000 UTC Normal Job.batch scheduled-backup-backup-9495 SuccessfulCreate Created pod: scheduled-backup-backup-9495-59c4w job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:40 +0000 UTC Normal Pod scheduled-backup-backup-9495-59c4w.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:40 +0000 UTC Normal Pod scheduled-backup-backup-9495-59c4w.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 110.166404ms (110.180906ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:40 +0000 UTC Normal Pod scheduled-backup-backup-9495-59c4w.spec.containers{pgbackrest} Created Created container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:00:40 +0000 UTC Normal Pod scheduled-backup-backup-9495-59c4w.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:02:06 +0000 UTC Normal Job.batch scheduled-backup-backup-9495 Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:21 +0000 UTC Normal Pod scheduled-backup-backup-9wlt-l9b6x Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-backup-9wlt-l9b6x to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:21 +0000 UTC Normal Job.batch scheduled-backup-backup-9wlt SuccessfulCreate Created pod: scheduled-backup-backup-9wlt-l9b6x job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:22 +0000 UTC Normal Pod scheduled-backup-backup-9wlt-l9b6x.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:22 +0000 UTC Normal Pod scheduled-backup-backup-9wlt-l9b6x.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 110.631996ms (110.67316ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:22 +0000 UTC Normal Pod scheduled-backup-backup-9wlt-l9b6x.spec.containers{pgbackrest} Created Created container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:03:22 +0000 UTC Normal Pod scheduled-backup-backup-9wlt-l9b6x.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:06:20 +0000 UTC Normal Job.batch scheduled-backup-backup-9wlt Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:07 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:07 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:07 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:07 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:08 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:08 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:10 +0000 UTC Warning Endpoints scheduled-backup-pods FailedToUpdateEndpoint Failed to update endpoint kuttl-test-rational-lamprey/scheduled-backup-pods: Operation cannot be fulfilled on endpoints "scheduled-backup-pods": the object has been modified; please apply your changes to the latest version and try again endpoint-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:12 +0000 UTC Normal PodDisruptionBudget.policy scheduled-backup-set-instance1 NoPods No matching pods found controllermanager logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:14 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-pgbackrest-restore-f4lmc to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:14 +0000 UTC Warning Pod scheduled-backup-pgbackrest-restore-f4lmc FailedAttachVolume Multi-Attach error for volume "pvc-af79a79c-5f46-414c-9ad6-d03d142e274b" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:14 +0000 UTC Normal Job.batch scheduled-backup-pgbackrest-restore SuccessfulCreate Created pod: scheduled-backup-pgbackrest-restore-f4lmc job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:34 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-af79a79c-5f46-414c-9ad6-d03d142e274b" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:36 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:36 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 110.921263ms (111.024183ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:36 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:36 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:37 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.containers{pgbackrest-restore} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:37 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.containers{pgbackrest-restore} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 100.032395ms (100.048876ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:37 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.containers{pgbackrest-restore} Created Created container pgbackrest-restore kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:37 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-f4lmc.spec.containers{pgbackrest-restore} Started Started container pgbackrest-restore kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:50 +0000 UTC Normal Job.batch scheduled-backup-pgbackrest-restore Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:51 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-7pwd-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:51 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-7pwd SuccessfulCreate create Pod scheduled-backup-instance1-7pwd-0 in StatefulSet scheduled-backup-instance1-7pwd successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:52 +0000 UTC Warning Pod scheduled-backup-instance1-7pwd-0 FailedMount MountVolume.SetUp failed for volume "patroni-config" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:58 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:58 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 107.650393ms (107.665091ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:58 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:58 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:59 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:59 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 96.859625ms (96.878141ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:59 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:07:59 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 85.232238ms (85.251754ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 89.963708ms (89.972457ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:00 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:07 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-lxpf-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-thzf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:07 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-lxpf SuccessfulCreate create Pod scheduled-backup-instance1-lxpf-0 in StatefulSet scheduled-backup-instance1-lxpf successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:07 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-nsn7-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-vz2w default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:07 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-nsn7 SuccessfulCreate create Pod scheduled-backup-instance1-nsn7-0 in StatefulSet scheduled-backup-instance1-nsn7 successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:15 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-5976192b-cbc8-4f52-a14a-5dd17014a494" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:15 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a2be3d20-6dd1-4039-bcaf-b61bc96dedc9" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 115.028133ms (115.04399ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 110.704021ms (110.720669ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:16 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 97.858065ms (97.871473ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 85.794958ms (85.811559ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:17 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 94.103113ms (94.119677ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 87.823998ms (87.841896ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 92.539975ms (92.548383ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:19 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 86.211628ms (86.219567ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:19 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:19 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:08:21 +0000 UTC Warning Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:11 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:14 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-pgbackrest-restore-7d6lv to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:14 +0000 UTC Normal Job.batch scheduled-backup-pgbackrest-restore SuccessfulCreate Created pod: scheduled-backup-pgbackrest-restore-7d6lv job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:19 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-pgbackrest" in 119.132094ms (119.151023ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.containers{pgbackrest-restore} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.containers{pgbackrest-restore} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 95.491565ms (95.507642ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.containers{pgbackrest-restore} Created Created container pgbackrest-restore kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:20 +0000 UTC Normal Pod scheduled-backup-pgbackrest-restore-7d6lv.spec.containers{pgbackrest-restore} Started Started container pgbackrest-restore kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:35 +0000 UTC Normal Job.batch scheduled-backup-pgbackrest-restore Completed Job completed job-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:36 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-7pwd-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-4jqf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:36 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-7pwd SuccessfulCreate create Pod scheduled-backup-instance1-7pwd-0 in StatefulSet scheduled-backup-instance1-7pwd successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:40 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:40 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 89.293024ms (89.312019ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:40 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:40 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:41 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:41 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 101.956948ms (101.971503ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:41 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:41 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:42 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:42 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 86.18945ms (86.197319ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:42 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:42 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:42 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 90.102199ms (90.12647ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:43 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:49 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-lxpf SuccessfulCreate create Pod scheduled-backup-instance1-lxpf-0 in StatefulSet scheduled-backup-instance1-lxpf successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:50 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-lxpf-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-vz2w default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:50 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 Scheduled Successfully assigned kuttl-test-rational-lamprey/scheduled-backup-instance1-nsn7-0 to gke-jen-pg-772-bf00908b7-default-pool-006d69c5-thzf default-scheduler logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:50 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-nsn7 SuccessfulCreate create Pod scheduled-backup-instance1-nsn7-0 in StatefulSet scheduled-backup-instance1-nsn7 successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:55 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a2be3d20-6dd1-4039-bcaf-b61bc96dedc9" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:56 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:56 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 114.753391ms (114.782ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:56 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:56 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:57 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-5976192b-cbc8-4f52-a14a-5dd17014a494" attachdetach-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:57 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:57 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 137.779547ms (137.800883ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:57 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:57 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 112.568419ms (112.58493ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Created Created container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 92.864323ms (92.88039ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 90.772907ms (90.790807ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:58 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:59 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:59 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:59 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 82.495907ms (82.503758ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:59 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Created Created container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:09:59 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 170.118086ms (170.13168ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Created Created container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Started Started container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg16-postgres" kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg16-postgres" in 109.686271ms (109.695066ms including waiting) kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Created Created container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:00 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Started Started container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:01 +0000 UTC Warning Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:03 +0000 UTC Warning Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:17 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:17 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-lxpf SuccessfulDelete delete Pod scheduled-backup-instance1-lxpf-0 in StatefulSet scheduled-backup-instance1-lxpf successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:18 +0000 UTC Normal Pod scheduled-backup-instance1-lxpf-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:18 +0000 UTC Warning Pod scheduled-backup-instance1-lxpf-0.spec.containers{database} Unhealthy Readiness probe failed: Get "https://10.75.113.15:8008/readiness": read tcp 10.75.113.1:40446->10.75.113.15:8008: read: connection reset by peer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:18 +0000 UTC Normal Pod scheduled-backup-instance1-nsn7-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:18 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-nsn7 SuccessfulDelete delete Pod scheduled-backup-instance1-nsn7-0 in StatefulSet scheduled-backup-instance1-nsn7 successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:20 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:20 +0000 UTC Normal Pod scheduled-backup-instance1-7pwd-0.spec.containers{replication-cert-copy} Killing Stopping container replication-cert-copy kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:20 +0000 UTC Normal StatefulSet.apps scheduled-backup-instance1-7pwd SuccessfulDelete delete Pod scheduled-backup-instance1-7pwd-0 in StatefulSet scheduled-backup-instance1-7pwd successful statefulset-controller logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-728sz.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-smqkh.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 10:10:32 | scheduled-backup | 2024-05-22 10:10:25 +0000 UTC Normal Pod scheduled-backup-pgbouncer-5c64475cff-x4z6n.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 10:10:32 | scheduled-backup | Deleting namespace: kuttl-test-rational-lamprey === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (1313.21s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/scheduled-backup (1310.42s) PASS