=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.132.123.183 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 23 tests === RUN kuttl/harness === RUN kuttl/harness/major-upgrade === PAUSE kuttl/harness/major-upgrade === CONT kuttl/harness/major-upgrade logger.go:42: 16:16:33 | major-upgrade | Creating namespace: kuttl-test-mutual-maggot logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_client deploy_s3_secrets] logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + source ../../functions logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ realpath ../../.. logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ pwd logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++ test_name=major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ GIT_BRANCH=PR-988 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export PG_VER=17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ PG_VER=17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export BUCKET=pg-operator-testing logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ BUCKET=pg-operator-testing logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ export PGOV1_VER=14 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ PGOV1_VER=14 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ which gdate logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ which date logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ which gsed logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++++ which sed logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ sed=/usr/bin/sed logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | +++ command -v oc logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | ++ oc get projects logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + init_temp_dir logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + rm -rf /tmp/kuttl/pg/major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + mkdir -p /tmp/kuttl/pg/major-upgrade logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + deploy_operator logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + local cw_prefix= logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + destroy_operator logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | + kubectl -n pg-operator delete deployment percona-postgresql-operator --force --grace-period=0 logger.go:42: 16:16:33 | major-upgrade/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 16:16:34 | major-upgrade/0-deploy-operator | deployment.apps "percona-postgresql-operator" force deleted logger.go:42: 16:16:34 | major-upgrade/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 16:16:34 | major-upgrade/0-deploy-operator | + kubectl delete namespace pg-operator --force --grace-period=0 logger.go:42: 16:16:34 | major-upgrade/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 16:16:34 | major-upgrade/0-deploy-operator | namespace "pg-operator" force deleted logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + [[ -n pg-operator ]] logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + create_namespace pg-operator logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + local namespace=pg-operator logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + [[ -n '' ]] logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + kubectl delete namespace pg-operator --ignore-not-found logger.go:42: 16:16:40 | major-upgrade/0-deploy-operator | + kubectl wait --for=delete namespace pg-operator logger.go:42: 16:16:41 | major-upgrade/0-deploy-operator | + kubectl create namespace pg-operator logger.go:42: 16:16:41 | major-upgrade/0-deploy-operator | namespace/pg-operator created logger.go:42: 16:16:41 | major-upgrade/0-deploy-operator | + cw_prefix=cw- logger.go:42: 16:16:41 | major-upgrade/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy/crd.yaml logger.go:42: 16:16:42 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/crunchybridgeclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:16:42 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgbackups.pgv2.percona.com serverside-applied logger.go:42: 16:16:44 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgclusters.pgv2.percona.com serverside-applied logger.go:42: 16:16:45 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgrestores.pgv2.percona.com serverside-applied logger.go:42: 16:16:45 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconapgupgrades.pgv2.percona.com serverside-applied logger.go:42: 16:16:45 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgadmins.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:16:46 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/pgupgrades.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:16:48 | major-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/postgresclusters.postgres-operator.crunchydata.com serverside-applied logger.go:42: 16:16:48 | major-upgrade/0-deploy-operator | + kubectl -n pg-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy/cw-rbac.yaml logger.go:42: 16:16:48 | major-upgrade/0-deploy-operator | serviceaccount/percona-postgresql-operator serverside-applied logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-postgresql-operator serverside-applied logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | + local disable_telemetry=true logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | + '[' major-upgrade == telemetry-transfer ']' logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | + yq eval '.spec.template.spec.containers[0].image = "perconalab/percona-postgresql-operator:PR-988-1f8703f6e"' /mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy/cw-operator.yaml logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | + yq eval '(.spec.template.spec.containers[] | select(.name=="operator") | .env[] | select(.name=="DISABLE_TELEMETRY") | .value) = "true"' - logger.go:42: 16:16:49 | major-upgrade/0-deploy-operator | + kubectl -n pg-operator apply -f - logger.go:42: 16:16:50 | major-upgrade/0-deploy-operator | deployment.apps/percona-postgresql-operator created logger.go:42: 16:16:50 | major-upgrade/0-deploy-operator | + deploy_client logger.go:42: 16:16:50 | major-upgrade/0-deploy-operator | + kubectl -n kuttl-test-mutual-maggot apply -f /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf/client.yaml logger.go:42: 16:16:51 | major-upgrade/0-deploy-operator | deployment.apps/pg-client created logger.go:42: 16:16:51 | major-upgrade/0-deploy-operator | + deploy_s3_secrets logger.go:42: 16:16:51 | major-upgrade/0-deploy-operator | + set +o xtrace logger.go:42: 16:16:52 | major-upgrade/0-deploy-operator | secret/aws-s3-secret created logger.go:42: 16:16:52 | major-upgrade/0-deploy-operator | secret/gcp-cs-secret created logger.go:42: 16:16:52 | major-upgrade/0-deploy-operator | secret/azure-secret created logger.go:42: 16:16:53 | major-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:16:53 | major-upgrade/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:16:53 | major-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 16:16:55 | major-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:16:55 | major-upgrade/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:16:55 | major-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 16:16:56 | major-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:16:56 | major-upgrade/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:16:57 | major-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 16:16:58 | major-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:16:58 | major-upgrade/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:16:59 | major-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-postgresql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | ASSERT deployment percona-postgresql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | percona-postgresql-operator pg-operator 1 logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | ASSERT PASS logger.go:42: 16:17:00 | major-upgrade/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | starting test step 1-create-cluster logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval ' .spec.postgresVersion = 13 | .spec.image = "perconalab/percona-postgresql-operator:main-ppg13-postgres" | .spec.instances[0].dataVolumeClaimSpec.resources.requests.storage = "3Gi" | .spec.patroni.createReplicaMethods = ["basebackup","pgbackrest"] | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest"' \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + source ../../functions logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ realpath ../../.. logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ pwd logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++ test_name=major-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ GIT_BRANCH=PR-988 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export PG_VER=17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ PG_VER=17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export BUCKET=pg-operator-testing logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ BUCKET=pg-operator-testing logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ export PGOV1_VER=14 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ PGOV1_VER=14 logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ which gdate logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ which date logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ which gsed logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++++ which sed logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ sed=/usr/bin/sed logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | +++ command -v oc logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ++ oc get projects logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + get_cr logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + local cr_name= logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + '[' -z ']' logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + cr_name=major-upgrade logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + local repo_path= logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + yq eval ' logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.postgresVersion = 13 | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.image = "perconalab/percona-postgresql-operator:main-ppg13-postgres" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.instances[0].dataVolumeClaimSpec.resources.requests.storage = "3Gi" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.patroni.createReplicaMethods = ["basebackup","pgbackrest"] | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest"' logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + yq eval ' logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .metadata.name = "major-upgrade" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .metadata.labels = {"e2e":"major-upgrade"} | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.postgresVersion = 17 | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.users += [{"name":"postgres","password":{"type":"AlphaNumeric"}}] | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.users += [{"name":"major-upgrade","password":{"type":"AlphaNumeric"}}] | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.image = "perconalab/percona-postgresql-operator:main-ppg17-postgres" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.initContainer.image = "perconalab/percona-postgresql-operator:K8SPG-708-12" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.backups.pgbackrest.image = "perconalab/percona-postgresql-operator:main-pgbackrest17" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.proxy.pgBouncer.image = "perconalab/percona-postgresql-operator:main-pgbouncer17" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.pmm.secret = "major-upgrade-pmm-secret" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.pmm.customClusterName = "major-upgrade-pmm-custom-name" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.pmm.postgresParams = "--environment=dev-postgres" logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ' /mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy/cr.yaml logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + kubectl -n kuttl-test-mutual-maggot apply -f - logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + [[ -n '' ]] logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + case $test_name in logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + yq eval -i ' logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.extensions.image = "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.extensions.imagePullPolicy = "Always" | logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | .spec.extensions.storage = {"type": "s3", "bucket": "pg-extensions", "region": "eu-central-1", "secret": {"name": "aws-s3-secret"}} logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | ' /tmp/kuttl/pg/major-upgrade/cr.yaml logger.go:42: 16:17:00 | major-upgrade/1-create-cluster | + cat /tmp/kuttl/pg/major-upgrade/cr.yaml logger.go:42: 16:17:01 | major-upgrade/1-create-cluster | perconapgcluster.pgv2.percona.com/major-upgrade created logger.go:42: 16:18:35 | major-upgrade/1-create-cluster | test step completed 1-create-cluster logger.go:42: 16:18:35 | major-upgrade/2-write-data | starting test step 2-write-data logger.go:42: 16:18:35 | major-upgrade/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_psql_local \ 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' \ "postgres:$(get_psql_user_pass major-upgrade-pguser-postgres)@$(get_psql_user_host major-upgrade-pguser-postgres)" run_psql_local \ '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' \ "postgres:$(get_psql_user_pass major-upgrade-pguser-postgres)@$(get_psql_user_host major-upgrade-pguser-postgres)"] logger.go:42: 16:18:35 | major-upgrade/2-write-data | + source ../../functions logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ realpath ../../.. logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ pwd logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ test_name=major-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ GIT_BRANCH=PR-988 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export PG_VER=17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ PG_VER=17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export BUCKET=pg-operator-testing logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ BUCKET=pg-operator-testing logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ export PGOV1_VER=14 logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ PGOV1_VER=14 logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ which gdate logger.go:42: 16:18:35 | major-upgrade/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ which date logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ date=/usr/bin/date logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ which gsed logger.go:42: 16:18:35 | major-upgrade/2-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++++ which sed logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ sed=/usr/bin/sed logger.go:42: 16:18:35 | major-upgrade/2-write-data | +++ command -v oc logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ oc get projects logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ get_psql_user_pass major-upgrade-pguser-postgres logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:35 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 16:18:36 | major-upgrade/2-write-data | ++ get_psql_user_host major-upgrade-pguser-postgres logger.go:42: 16:18:36 | major-upgrade/2-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:36 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 16:18:36 | major-upgrade/2-write-data | + run_psql_local 'CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:36 | major-upgrade/2-write-data | + local 'command=CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' logger.go:42: 16:18:36 | major-upgrade/2-write-data | + local uri=postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:36 | major-upgrade/2-write-data | + local driver=postgres logger.go:42: 16:18:36 | major-upgrade/2-write-data | ++ get_client_pod logger.go:42: 16:18:36 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 16:18:36 | major-upgrade/2-write-data | + kubectl -n kuttl-test-mutual-maggot exec pg-client-b7cfff86c-84f6t -- bash -c 'printf '\''CREATE DATABASE myapp; \c myapp \\\ CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc'\''' logger.go:42: 16:18:38 | major-upgrade/2-write-data | ++ get_psql_user_pass major-upgrade-pguser-postgres logger.go:42: 16:18:38 | major-upgrade/2-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:38 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 16:18:39 | major-upgrade/2-write-data | ++ get_psql_user_host major-upgrade-pguser-postgres logger.go:42: 16:18:39 | major-upgrade/2-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:39 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 16:18:39 | major-upgrade/2-write-data | + run_psql_local '\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:39 | major-upgrade/2-write-data | + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)' logger.go:42: 16:18:39 | major-upgrade/2-write-data | + local uri=postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:39 | major-upgrade/2-write-data | + local driver=postgres logger.go:42: 16:18:39 | major-upgrade/2-write-data | ++ get_client_pod logger.go:42: 16:18:39 | major-upgrade/2-write-data | ++ kubectl -n kuttl-test-mutual-maggot get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 16:18:39 | major-upgrade/2-write-data | + kubectl -n kuttl-test-mutual-maggot exec pg-client-b7cfff86c-84f6t -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100500)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc'\''' [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 13 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002f3c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002f3c00}, 0x0}, {0x184a055?, 0xc0006cff80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc000462a10, {0x1accd90, 0xc0002f2100}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc000388008?, {0x0, 0xc000462a10, {0x1accd90, 0xc0002f2100}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc000388008, {0x0, 0xc000462a10, {0x1accd90, 0xc0002f2100}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc000373208, 0xdd?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0000aa270, 0xc00059cd00, {0xc0004dc408, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0000aa270, 0xc00059cd00, {0xc0004dc408, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc000283360, 0xc00059cd00, 0xc0000ffc20) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc00059cd00) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc00059cd00, 0xc0005823c0) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 12 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 16:18:41 | major-upgrade/2-write-data | test step completed 2-write-data logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_psql_local '\c myapp \\\ SELECT * from myApp;' "postgres:$(get_psql_user_pass major-upgrade-pguser-postgres)@$(get_psql_user_host major-upgrade-pguser-postgres)") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | + source ../../functions logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ realpath ../../.. logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ pwd logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++ test_name=major-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ GIT_BRANCH=PR-988 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export PG_VER=17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ PG_VER=17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export BUCKET=pg-operator-testing logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ BUCKET=pg-operator-testing logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ export PGOV1_VER=14 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ PGOV1_VER=14 logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ which gdate logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ which date logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ date=/usr/bin/date logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ which gsed logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++++ which sed logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ sed=/usr/bin/sed logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ command -v oc logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | ++ oc get projects logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ get_psql_user_pass major-upgrade-pguser-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ get_psql_user_host major-upgrade-pguser-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:18:41 | major-upgrade/3-read-from-primary | +++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | ++ run_psql_local '\c myapp \\\ SELECT * from myApp;' postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | ++ local 'command=\c myapp \\\ SELECT * from myApp;' logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | ++ local uri=postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | ++ local driver=postgres logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | +++ get_client_pod logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | +++ kubectl -n kuttl-test-mutual-maggot get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 16:18:42 | major-upgrade/3-read-from-primary | ++ kubectl -n kuttl-test-mutual-maggot exec pg-client-b7cfff86c-84f6t -- bash -c 'printf '\''\c myapp \\\ SELECT * from myApp;\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc'\''' logger.go:42: 16:18:44 | major-upgrade/3-read-from-primary | + data=' 100500' logger.go:42: 16:18:44 | major-upgrade/3-read-from-primary | + kubectl create configmap -n kuttl-test-mutual-maggot 03-read-from-primary '--from-literal=data= 100500' logger.go:42: 16:18:44 | major-upgrade/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 16:18:44 | major-upgrade/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | starting test step 20-13-to-14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | running command: [sh -c set -o errexit set -o xtrace source ../../functions kubectl apply -n ${NAMESPACE} -f - <<-EOF apiVersion: pgv2.percona.com/v2 kind: PerconaPGUpgrade metadata: name: 13-to-14 spec: postgresClusterName: major-upgrade image: ${IMAGE_UPGRADE} fromPostgresVersion: 13 toPostgresVersion: 14 toPostgresImage: $(get_container_image "postgres" 14) toPgBouncerImage: $(get_container_image "pgbouncer" 14) toPgBackRestImage: $(get_container_image "pgbackrest" 14) EOF] logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | + source ../../functions logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ realpath ../../.. logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ pwd logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ test_name=major-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ GIT_BRANCH=PR-988 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export PG_VER=17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ PG_VER=17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export BUCKET=pg-operator-testing logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ BUCKET=pg-operator-testing logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ export PGOV1_VER=14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ PGOV1_VER=14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ which gdate logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ which date logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ date=/usr/bin/date logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ which gsed logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++++ which sed logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ sed=/usr/bin/sed logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | +++ command -v oc logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ oc get projects logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | + kubectl apply -n kuttl-test-mutual-maggot -f - logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ get_container_image postgres 14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local component=postgres logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local pgVersion=14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local operatorVersion=PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ [[ ! PR-988-1f8703f6e =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ operatorVersion=main logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ echo perconalab/percona-postgresql-operator:main-ppg14-postgres logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ get_container_image pgbouncer 14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local component=pgbouncer logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local pgVersion=14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local operatorVersion=PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ [[ ! PR-988-1f8703f6e =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ operatorVersion=main logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ echo perconalab/percona-postgresql-operator:main-ppg14-pgbouncer logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ get_container_image pgbackrest 14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local component=pgbackrest logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local pgVersion=14 logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ local operatorVersion=PR-988-1f8703f6e logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ [[ ! PR-988-1f8703f6e =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ operatorVersion=main logger.go:42: 16:18:44 | major-upgrade/20-13-to-14 | ++ echo perconalab/percona-postgresql-operator:main-ppg14-pgbackrest logger.go:42: 16:18:45 | major-upgrade/20-13-to-14 | perconapgupgrade.pgv2.percona.com/13-to-14 created logger.go:42: 16:18:46 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc ready 3 3 105s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 42s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 4/4 Running 0 78s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 4/4 Running 0 79s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 79s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-hlrv4 2/2 Running 0 77s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-pp7b9 2/2 Running 0 76s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-z5dwc 2/2 Running 0 76s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 77s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 116s logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:18:47 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 42s logger.go:42: 16:18:53 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc stopping 3 3 112s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 49s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 0/4 Terminating 0 85s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 86s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-hlrv4 2/2 Running 0 84s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-pp7b9 2/2 Running 0 83s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-6c886fdbdc-z5dwc 2/2 Running 0 83s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 84s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m3s logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:18:54 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 49s logger.go:42: 16:19:00 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc stopping 1 0 2m logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 56s logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Terminating 0 93s logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 91s logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m10s logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:01 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 56s logger.go:42: 16:19:07 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m7s logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 63s logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 98s logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m17s logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:08 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 63s logger.go:42: 16:19:14 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m13s logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 4s logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 70s logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m24s logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 4s 4s logger.go:42: 16:19:15 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 70s logger.go:42: 16:19:21 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m20s logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 10s logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 76s logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m30s logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 10s 10s logger.go:42: 16:19:22 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 76s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m27s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 17s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 83s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m37s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 17s 17s logger.go:42: 16:19:28 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 83s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m34s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 24s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 90s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m44s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 24s 24s logger.go:42: 16:19:35 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 90s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m41s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 31s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 97s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m51s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 31s 31s logger.go:42: 16:19:42 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 97s logger.go:42: 16:19:48 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m48s logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:0/4 0 38s logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 104s logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 2m58s logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 38s 38s logger.go:42: 16:19:49 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 104s logger.go:42: 16:19:55 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 2m55s logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Init:1/4 0 45s logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 111s logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m5s logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 45s 45s logger.go:42: 16:19:56 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 111s logger.go:42: 16:20:02 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 3m1s logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 1/1 Running 0 52s logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 118s logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m12s logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 0/1 52s 52s logger.go:42: 16:20:03 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 118s logger.go:42: 16:20:09 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 3m8s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 ContainerCreating 0 0s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 ContainerCreating 0 0s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 58s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 2m4s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m18s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 0/1 1s 1s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 0/1 1s 1s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 59s logger.go:42: 16:20:10 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 2m5s logger.go:42: 16:20:16 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 3m15s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 ContainerCreating 0 7s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 ContainerCreating 0 7s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 65s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 2m11s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m25s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 0/1 8s 8s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 0/1 8s 8s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 66s logger.go:42: 16:20:17 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 2m12s logger.go:42: 16:20:23 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc paused 0 0 3m22s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 14s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 14s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 72s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-qtz4-wrxwr 0/1 Completed 0 2m18s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m32s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 0/1 15s 15s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 15s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 73s logger.go:42: 16:20:24 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-qtz4 1/1 29s 2m19s logger.go:42: 16:20:30 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 0 3m29s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 21s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 21s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 79s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 3s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 0/2 ContainerCreating 0 1s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 0/2 ContainerCreating 0 1s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 0/2 ContainerCreating 0 1s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 0/2 Init:0/2 0 2s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m39s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 22s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 22s logger.go:42: 16:20:31 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 80s logger.go:42: 16:20:37 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 2 3m36s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 28s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 28s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 86s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 10s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 0/2 ContainerCreating 0 8s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 8s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 8s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 0/2 Init:0/2 0 9s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m46s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 29s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 29s logger.go:42: 16:20:38 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 87s logger.go:42: 16:20:44 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 2 3m43s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 35s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 35s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 93s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 17s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 0/2 ContainerCreating 0 15s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 15s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 15s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 16s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 3m53s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 36s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 36s logger.go:42: 16:20:45 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 94s logger.go:42: 16:20:51 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 3m50s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 42s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 42s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 100s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 24s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 22s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 22s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 22s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 23s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 43s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 43s logger.go:42: 16:20:52 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 101s logger.go:42: 16:20:58 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 3m57s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 49s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 49s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 107s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 31s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 29s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 29s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 29s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 30s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m7s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 50s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 50s logger.go:42: 16:20:59 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 108s logger.go:42: 16:21:05 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 4m4s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 56s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 56s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 114s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 Init:3/5 0 38s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 36s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 36s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 36s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 37s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m14s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 57s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 57s logger.go:42: 16:21:06 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 115s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 4m12s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 64s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 64s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m2s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 0/4 Init:0/5 0 1s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 0/4 Init:0/5 0 2s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 PodInitializing 0 46s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 44s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 44s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 44s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 45s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m22s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 64s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 64s logger.go:42: 16:21:13 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m2s logger.go:42: 16:21:20 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 4m19s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 71s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 71s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m9s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-5rsx-695m9 0/1 PodInitializing 0 6s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 0/4 Init:0/5 0 8s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 0/4 Init:0/5 0 9s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 0/4 PodInitializing 0 53s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 51s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 51s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 51s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 52s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m29s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 72s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 72s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m10s logger.go:42: 16:21:21 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-5rsx 0/1 7s 8s logger.go:42: 16:21:27 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc initializing 0 3 4m27s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 79s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 79s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m17s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-5rsx-695m9 1/1 Running 0 14s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 0/4 PodInitializing 0 16s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 3/4 Running 0 17s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 61s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-patroni-version-check 1/1 Running 0 6s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 59s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 59s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 59s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 60s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m37s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 79s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 79s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m17s logger.go:42: 16:21:28 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-5rsx 0/1 14s 15s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc ready 3 3 4m34s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 86s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 86s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m24s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-5rsx-695m9 1/1 Running 0 21s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 4/4 Running 0 23s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 4/4 Running 0 24s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 68s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 66s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 66s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 66s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 67s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m44s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 86s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 86s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m24s logger.go:42: 16:21:35 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-5rsx 0/1 21s 22s logger.go:42: 16:21:48 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc ready 3 3 4m48s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 100s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 100s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m38s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-5rsx-695m9 0/1 Completed 0 35s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 4/4 Running 0 37s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 4/4 Running 0 38s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 82s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 80s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 80s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 80s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 81s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 4m58s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 100s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 100s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m38s logger.go:42: 16:21:49 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-5rsx 0/1 35s 36s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | running command: [sh -c kubectl -n ${NAMESPACE} get pg,pod,job # # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/pgupgrade=13-to-14 | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # sleep 5] logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | NAME ENDPOINT STATUS POSTGRES PGBOUNCER AGE logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | perconapgcluster.pgv2.percona.com/major-upgrade major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc ready 3 3 4m55s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | NAME READY STATUS RESTARTS AGE logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 107s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 107s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/13-to-14-pgdata-p7h2d 0/1 Completed 0 2m45s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-backup-5rsx-695m9 0/1 Completed 0 42s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-jjsw-0 4/4 Running 0 44s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-l2dk-0 4/4 Running 0 45s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-instance1-qt55-0 4/4 Running 0 89s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 87s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 87s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 87s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/major-upgrade-repo-host-0 2/2 Running 0 88s logger.go:42: 16:21:56 | major-upgrade/20-13-to-14 | pod/pg-client-b7cfff86c-84f6t 1/1 Running 0 5m5s logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | NAME COMPLETIONS DURATION AGE logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-jjsw 1/1 16s 107s logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | job.batch/13-to-14-major-upgrade-instance1-l2dk 1/1 14s 107s logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | job.batch/13-to-14-pgdata 1/1 58s 2m45s logger.go:42: 16:21:57 | major-upgrade/20-13-to-14 | job.batch/major-upgrade-backup-5rsx 1/1 36s 43s logger.go:42: 16:22:02 | major-upgrade/20-13-to-14 | test step completed 20-13-to-14 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | starting test step 21-wait-and-write-data logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions sleep 90 # wait some time for stanza to upgrade for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do echo "Checking the status of ${pod}:" kubectl -n ${NAMESPACE} exec -it ${pod} -- psql -c "SELECT version()" kubectl -n ${NAMESPACE} exec -it ${pod} -- psql -c "SELECT timeline_id FROM pg_control_checkpoint()" kubectl -n ${NAMESPACE} exec -it ${pod} -- pgbackrest --stanza=db --log-level-console=detail check kubectl -n ${NAMESPACE} exec -it ${pod} -- ls -l /pgdata/pg14_wal done run_psql_local \ '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' \ "postgres:$(get_psql_user_pass major-upgrade-pguser-postgres)@$(get_psql_user_host major-upgrade-pguser-postgres)"] logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | + source ../../functions logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ realpath ../../.. logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ pwd logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++ test_name=major-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ GIT_BRANCH=PR-988 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export PG_VER=17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ PG_VER=17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export BUCKET=pg-operator-testing logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ BUCKET=pg-operator-testing logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ export PGOV1_VER=14 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ PGOV1_VER=14 logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ which gdate logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ which date logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ date=/usr/bin/date logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ which gsed logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++++ which sed logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ sed=/usr/bin/sed logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | +++ command -v oc logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | ++ oc get projects logger.go:42: 16:22:02 | major-upgrade/21-wait-and-write-data | + sleep 90 logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | ++ kubectl -n kuttl-test-mutual-maggot get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | ++ awk '{print $1}' logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | + echo 'Checking the status of major-upgrade-instance1-jjsw-0:' logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | Checking the status of major-upgrade-instance1-jjsw-0: logger.go:42: 16:23:32 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-jjsw-0 -- psql -c 'SELECT version()' logger.go:42: 16:23:33 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:33 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | version logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | ---------------------------------------------------------------------------------------------------------------------------------- logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:34 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-jjsw-0 -- psql -c 'SELECT timeline_id FROM pg_control_checkpoint()' logger.go:42: 16:23:35 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:35 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | timeline_id logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | ------------- logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | 2 logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-jjsw-0 -- pgbackrest --stanza=db --log-level-console=detail check logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:36 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:37.993 P00 INFO: check command begin 2.54.2: --exec-id=365-cc637047 --log-level-console=detail --log-path=/pgdata/pgbackrest/log --pg1-path=/pgdata/pg14 --pg1-port=5432 --pg1-socket-path=/tmp/postgres --repo1-host=major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local. --repo1-host-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --repo1-host-cert-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt --repo1-host-key-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key --repo1-host-type=tls --repo1-host-user=postgres --repo1-path=/pgbackrest/repo1 --stanza=db logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:38.004 P00 INFO: check repo1 (standby) logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:38.015 P00 INFO: switch wal not performed because this is a standby logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:38.016 P00 DETAIL: statistics: {"socket.client":{"total":1},"socket.session":{"total":1},"tls.client":{"total":1},"tls.session":{"total":1}} logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:38.016 P00 INFO: check command end: completed successfully (26ms) logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-jjsw-0 -- ls -l /pgdata/pg14_wal logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:38 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | total 65544 logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | -rw-r----- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000D logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | -rw-r----- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000E logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000F logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:22 000000020000000000000010 logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 41 Apr 11 16:21 00000002.history logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | drwx------ 2 postgres postgres 4096 Apr 11 16:22 archive_status logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | + echo 'Checking the status of major-upgrade-instance1-l2dk-0:' logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | Checking the status of major-upgrade-instance1-l2dk-0: logger.go:42: 16:23:39 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-l2dk-0 -- psql -c 'SELECT version()' logger.go:42: 16:23:40 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:40 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | version logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | ---------------------------------------------------------------------------------------------------------------------------------- logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:41 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-l2dk-0 -- psql -c 'SELECT timeline_id FROM pg_control_checkpoint()' logger.go:42: 16:23:42 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:42 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | timeline_id logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | ------------- logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | 2 logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:43 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-l2dk-0 -- pgbackrest --stanza=db --log-level-console=detail check logger.go:42: 16:23:44 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:44 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:45.310 P00 INFO: check command begin 2.54.2: --exec-id=419-9e585a74 --log-level-console=detail --log-path=/pgdata/pgbackrest/log --pg1-path=/pgdata/pg14 --pg1-port=5432 --pg1-socket-path=/tmp/postgres --repo1-host=major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local. --repo1-host-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --repo1-host-cert-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt --repo1-host-key-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key --repo1-host-type=tls --repo1-host-user=postgres --repo1-path=/pgbackrest/repo1 --stanza=db logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:45.320 P00 INFO: check repo1 (standby) logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:45.332 P00 INFO: switch wal not performed because this is a standby logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:45.333 P00 DETAIL: statistics: {"socket.client":{"total":1},"socket.session":{"total":1},"tls.client":{"total":1},"tls.session":{"total":1}} logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:45.333 P00 INFO: check command end: completed successfully (26ms) logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-l2dk-0 -- ls -l /pgdata/pg14_wal logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:45 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | total 81928 logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw-r----- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000C logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw-r----- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000D logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000E logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000F logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:22 000000020000000000000010 logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 41 Apr 11 16:21 00000002.history logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | drwx------ 2 postgres postgres 4096 Apr 11 16:22 archive_status logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | + echo 'Checking the status of major-upgrade-instance1-qt55-0:' logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | Checking the status of major-upgrade-instance1-qt55-0: logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-qt55-0 -- psql -c 'SELECT version()' logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:47 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | version logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | ---------------------------------------------------------------------------------------------------------------------------------- logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:48 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-qt55-0 -- psql -c 'SELECT timeline_id FROM pg_control_checkpoint()' logger.go:42: 16:23:49 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:49 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | timeline_id logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | ------------- logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | 2 logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | (1 row) logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | logger.go:42: 16:23:50 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-qt55-0 -- pgbackrest --stanza=db --log-level-console=detail check logger.go:42: 16:23:51 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:51 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:52 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:52.617 P00 INFO: check command begin 2.54.2: --exec-id=760-74f0368e --log-level-console=detail --log-path=/pgdata/pgbackrest/log --pg1-path=/pgdata/pg14 --pg1-port=5432 --pg1-socket-path=/tmp/postgres --repo1-host=major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local. --repo1-host-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --repo1-host-cert-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt --repo1-host-key-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key --repo1-host-type=tls --repo1-host-user=postgres --repo1-path=/pgbackrest/repo1 --stanza=db logger.go:42: 16:23:52 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:52.626 P00 INFO: check repo1 configuration (primary) logger.go:42: 16:23:52 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:52.755 P00 INFO: check repo1 archive for WAL (primary) logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:53.059 P00 INFO: WAL segment 000000020000000000000011 successfully archived to '/pgbackrest/repo1/archive/db/14-2/0000000200000000/000000020000000000000011-1be4184c03ddac346c1d69e21ba4cc2c97451b17.gz' on repo1 logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:53.060 P00 DETAIL: statistics: {"socket.client":{"total":1},"socket.session":{"total":1},"tls.client":{"total":1},"tls.session":{"total":1}} logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | 2025-04-11 16:23:53.060 P00 INFO: check command end: completed successfully (445ms) logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec -it major-upgrade-instance1-qt55-0 -- ls -l /pgdata/pg14_wal logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:53 | major-upgrade/21-wait-and-write-data | Unable to use a TTY - input is not a terminal or the right kind of file logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | total 131084 logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:20 00000001000000000000000B.partial logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000B logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000C logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000D logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000E logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:21 00000002000000000000000F logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 371 Apr 11 16:21 00000002000000000000000F.00000028.backup logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:22 000000020000000000000010 logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 16777216 Apr 11 16:23 000000020000000000000011 logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | -rw------- 1 postgres postgres 41 Apr 11 16:21 00000002.history logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | drwx------ 2 postgres postgres 4096 Apr 11 16:23 archive_status logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | ++ get_psql_user_pass major-upgrade-pguser-postgres logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:23:54 | major-upgrade/21-wait-and-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.password | base64decode}}' logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | ++ get_psql_user_host major-upgrade-pguser-postgres logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | ++ local secret_name=major-upgrade-pguser-postgres logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | ++ kubectl -n kuttl-test-mutual-maggot get secret/major-upgrade-pguser-postgres '--template={{.data.host | base64decode }}' logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | + run_psql_local '\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | + local 'command=\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)' logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | + local uri=postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | + local driver=postgres logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | ++ get_client_pod logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | ++ kubectl -n kuttl-test-mutual-maggot get pods --selector=name=pg-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 16:23:55 | major-upgrade/21-wait-and-write-data | + kubectl -n kuttl-test-mutual-maggot exec pg-client-b7cfff86c-84f6t -- bash -c 'printf '\''\c myapp \\\ INSERT INTO myApp (id) VALUES (100501)\n'\'' | psql -v ON_ERROR_STOP=1 -t -q postgres://'\''postgres:UDpkqYAga7xsSGEQ9CtkwrvG@major-upgrade-primary.kuttl-test-mutual-maggot.svc'\''' logger.go:42: 16:23:57 | major-upgrade/21-wait-and-write-data | test step completed 21-wait-and-write-data logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | starting test step 23-recreate-stanza logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | running command: [sh -c set -o errexit set -o xtrace source ../../functions for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do kubectl -n ${NAMESPACE} exec ${pod} -- touch /pgdata/sleep-forever kubectl -n ${NAMESPACE} exec ${pod} -- pgbackrest --stanza=db --log-level-console=debug stop done pod=$(kubectl get -n ${NAMESPACE} pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | tail -n 1 | awk '{print $1}') kubectl -n ${NAMESPACE} exec ${pod} -- patronictl pause for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do kubectl -n ${NAMESPACE} exec ${pod} -- pg_ctl -D /pgdata/pg14 stop done kubectl -n ${NAMESPACE} exec ${pod} -- pgbackrest --stanza=db --log-level-console=debug stanza-delete for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do kubectl -n ${NAMESPACE} exec ${pod} -- pgbackrest --stanza=db --log-level-console=debug start kubectl -n ${NAMESPACE} exec ${pod} -- rm /pgdata/sleep-forever done kubectl -n ${NAMESPACE} exec ${pod} -- patronictl resume # give some time to patroni for starting postgres sleep 90 kubectl -n ${NAMESPACE} exec ${pod} -- pgbackrest --stanza=db --log-level-console=debug stanza-create] logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | + source ../../functions logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ realpath ../../.. logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ CERT_MANAGER_VER=1.17.1 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ pwd logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ basename /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/tests/major-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ test_name=major-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ source /mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/vars.sh logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-pg-operator_PR-988 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/deploy logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-pg-operator_PR-988/e2e-tests/conf logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ TEMP_DIR=/tmp/kuttl/pg/major-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export GIT_BRANCH=PR-988 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ GIT_BRANCH=PR-988 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export VERSION=PR-988-1f8703f6e logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ VERSION=PR-988-1f8703f6e logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_BASE=perconalab/percona-postgresql-operator logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE=perconalab/percona-postgresql-operator:PR-988-1f8703f6e logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export PG_VER=17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ PG_VER=17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_PGBOUNCER=perconalab/percona-postgresql-operator:main-pgbouncer17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_POSTGRESQL=perconalab/percona-postgresql-operator:main-ppg17-postgres logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_BACKREST=perconalab/percona-postgresql-operator:main-pgbackrest17 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_UPGRADE=perconalab/percona-postgresql-operator:main-upgrade logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export BUCKET=pg-operator-testing logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ BUCKET=pg-operator-testing logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export PGOV1_TAG=1.4.0 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ PGOV1_TAG=1.4.0 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ export PGOV1_VER=14 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ PGOV1_VER=14 logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ which gdate logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | which: no gdate in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ which date logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ date=/usr/bin/date logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ which gsed logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | which: no gsed in (/mnt/jenkins/workspace/cloud-pg-operator_PR-988/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++++ which sed logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ sed=/usr/bin/sed logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | +++ command -v oc logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ oc get projects logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ kubectl -n kuttl-test-mutual-maggot get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers logger.go:42: 16:23:57 | major-upgrade/23-recreate-stanza | ++ awk '{print $1}' logger.go:42: 16:23:58 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:23:58 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-jjsw-0 -- touch /pgdata/sleep-forever logger.go:42: 16:23:58 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:23:59 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-jjsw-0 -- pgbackrest --stanza=db --log-level-console=debug stop logger.go:42: 16:24:00 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.563 P00 INFO: stop command begin 2.54.2: --exec-id=421-adcff8c3 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"421-adcff8c3"}) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: command/control/stop::cmdStop: (void) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storagePathCreate: (this: {type: posix, path: /, write: true}, pathExp: {"/tmp/pgbackrest"}, param.errorOnExists: false, param.noParentCreate: false, param.mode: 0770) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storagePathCreate: => void logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.modeFile: 0000, param.modePath: 0770, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: true, param.noTruncate: true, param.compressible: false) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.564 P00 DEBUG: storage/storage::storageNewWrite: => {type: posix, name: /tmp/pgbackrest/db.stop, modeFile: 0640, modePath: 0770, createPath: true, syncFile: true, syncPath: true, atomic: false} logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.565 P00 DEBUG: command/control/stop::cmdStop: => void logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.565 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.565 P00 INFO: stop command end: completed successfully (4ms) logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.566 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:01.566 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:01 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-l2dk-0 -- touch /pgdata/sleep-forever logger.go:42: 16:24:02 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:03 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-l2dk-0 -- pgbackrest --stanza=db --log-level-console=debug stop logger.go:42: 16:24:03 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 INFO: stop command begin 2.54.2: --exec-id=472-ffb5ec37 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"472-ffb5ec37"}) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: command/control/stop::cmdStop: (void) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storagePathCreate: (this: {type: posix, path: /, write: true}, pathExp: {"/tmp/pgbackrest"}, param.errorOnExists: false, param.noParentCreate: false, param.mode: 0770) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storagePathCreate: => void logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.modeFile: 0000, param.modePath: 0770, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: true, param.noTruncate: true, param.compressible: false) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.032 P00 DEBUG: storage/storage::storageNewWrite: => {type: posix, name: /tmp/pgbackrest/db.stop, modeFile: 0640, modePath: 0770, createPath: true, syncFile: true, syncPath: true, atomic: false} logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.033 P00 DEBUG: command/control/stop::cmdStop: => void logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.034 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.034 P00 INFO: stop command end: completed successfully (5ms) logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.034 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:05.034 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- touch /pgdata/sleep-forever logger.go:42: 16:24:05 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:06 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- pgbackrest --stanza=db --log-level-console=debug stop logger.go:42: 16:24:07 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 INFO: stop command begin 2.54.2: --exec-id=850-e9cb8861 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"850-e9cb8861"}) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: command/control/stop::cmdStop: (void) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storagePathCreate: (this: {type: posix, path: /, write: true}, pathExp: {"/tmp/pgbackrest"}, param.errorOnExists: false, param.noParentCreate: false, param.mode: 0770) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storagePathCreate: => void logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.modeFile: 0000, param.modePath: 0770, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: true, param.noTruncate: true, param.compressible: false) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.366 P00 DEBUG: storage/storage::storageNewWrite: => {type: posix, name: /tmp/pgbackrest/db.stop, modeFile: 0640, modePath: 0770, createPath: true, syncFile: true, syncPath: true, atomic: false} logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.368 P00 DEBUG: command/control/stop::cmdStop: => void logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.368 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.368 P00 INFO: stop command end: completed successfully (5ms) logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.368 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:08.368 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | ++ kubectl get -n kuttl-test-mutual-maggot pods -l postgres-operator.crunchydata.com/data=postgres --no-headers logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | ++ tail -n 1 logger.go:42: 16:24:08 | major-upgrade/23-recreate-stanza | ++ awk '{print $1}' logger.go:42: 16:24:09 | major-upgrade/23-recreate-stanza | + pod=major-upgrade-instance1-qt55-0 logger.go:42: 16:24:09 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- patronictl pause logger.go:42: 16:24:09 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:11 | major-upgrade/23-recreate-stanza | Success: cluster management is paused logger.go:42: 16:24:11 | major-upgrade/23-recreate-stanza | ++ kubectl -n kuttl-test-mutual-maggot get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers logger.go:42: 16:24:11 | major-upgrade/23-recreate-stanza | ++ awk '{print $1}' logger.go:42: 16:24:11 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:11 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-jjsw-0 -- pg_ctl -D /pgdata/pg14 stop logger.go:42: 16:24:12 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:13 | major-upgrade/23-recreate-stanza | waiting for server to shut down.... done logger.go:42: 16:24:13 | major-upgrade/23-recreate-stanza | server stopped logger.go:42: 16:24:13 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:13 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-l2dk-0 -- pg_ctl -D /pgdata/pg14 stop logger.go:42: 16:24:14 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:15 | major-upgrade/23-recreate-stanza | waiting for server to shut down.... done logger.go:42: 16:24:15 | major-upgrade/23-recreate-stanza | server stopped logger.go:42: 16:24:15 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:15 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- pg_ctl -D /pgdata/pg14 stop logger.go:42: 16:24:16 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:17 | major-upgrade/23-recreate-stanza | waiting for server to shut down.... done logger.go:42: 16:24:17 | major-upgrade/23-recreate-stanza | server stopped logger.go:42: 16:24:17 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- pgbackrest --stanza=db --log-level-console=debug stanza-delete logger.go:42: 16:24:17 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.964 P00 DEBUG: common/io/socket/common::sckInit: (block: false, keepAlive: true, tcpKeepAliveCount: 0, tcpKeepAliveIdle: 0, tcpKeepAliveInterval: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.964 P00 DEBUG: common/io/socket/common::sckInit: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 INFO: stanza-delete command begin 2.54.2: --exec-id=902-16a04568 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --pg1-path=/pgdata/pg14 --pg1-port=5432 --pg1-socket-path=/tmp/postgres --repo1-host=major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local. --repo1-host-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --repo1-host-cert-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt --repo1-host-key-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key --repo1-host-type=tls --repo1-host-user=postgres --repo1-path=/pgbackrest/repo1 --stanza=db logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"902-16a04568"}) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: command/lock::cmdLockAcquire: (param.returnOnNoLock: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: command/lock::cmdLockAcquire: => true logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: command/stanza/delete::cmdStanzaDelete: (void) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: protocol/helper::repoIsLocal: (repoIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: protocol/helper::repoIsLocal: => false logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: protocol/helper::protocolRemoteGet: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: common/io/socket/client::sckClientNew: (host: {"major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local."}, port: 8432, timeoutConnect: 60000, timeoutSession: 1860000) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: common/io/socket/client::sckClientNew: => {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.966 P00 DEBUG: common/io/tls/client::tlsClientNew: (ioClient: {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}}, host: {"major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local."}, timeoutConnect: 60000, timeoutSession: 1860000, verifyPeer: true, param.caFile: {"/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt"}, param.caPath: null, param.certFile: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt"}, param.keyFile: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key"}) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: false, param.pathExpressionFunction: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: false, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: false} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: false} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key"}, param.level: 0, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.968 P00 DEBUG: common/io/tls/client::tlsClientNew: => {type: tls, driver: {ioClient: {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}}, timeoutConnect: 60000, timeoutSession: 1860000, verifyPeer: true}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.977 P00 DEBUG: protocol/helper::protocolRemoteParam: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.977 P00 DEBUG: protocol/helper::protocolRemoteParam: => {["--exec-id=902-16a04568", "--lock=db-archive-1.lock", "--lock=db-backup-1.lock", "--log-level-console=off", "--log-level-file=off", "--log-level-stderr=error", "--pg1-path=/pgdata/pg14", "--process=0", "--remote-type=repo", "--repo=1", "--repo1-path=/pgbackrest/repo1", "--stanza=db", "stanza-delete:remote"]} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.981 P00 DEBUG: protocol/helper::protocolRemoteGet: => {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.981 P00 DEBUG: storage/remote/storage::storageRemoteNew: (modeFile: 0640, modePath: 0750, write: false, targetTime: 0, pathExpressionFunction: (function *), client: {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle}, compressLevel: 3) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.982 P00 DEBUG: storage/remote/storage::storageRemoteNew: => {type: remote, path: /pgbackrest/repo1, write: false} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.982 P00 DEBUG: storage/storage::storageList: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.errorOnMissing: false, param.nullOnMissing: true, param.expression: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.982 P00 DEBUG: storage/storage::storageNewItr: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.level: 1, param.errorOnMissing: false, param.recurse: false, param.nullOnMissing: true, param.sortOrder: 0, param.expression: null, param.recurse: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.982 P00 DEBUG: storage/iterator::storageItrNew: (driver: *void, path: {"/pgbackrest/repo1/archive/db"}, level: 1, errorOnMissing: false, nullOnMissing: true, recurse: false, sortOrder: 0, targetTime: 0, expression: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/iterator::storageItrNew: => {stack: {size: 1}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/storage::storageNewItr: => {stack: {size: 1}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/storage::storageList: => {["archive.info", "archive.info.copy", "13-1", "14-2"]} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/storage::storageList: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.errorOnMissing: false, param.nullOnMissing: true, param.expression: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/storage::storageNewItr: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.level: 1, param.errorOnMissing: false, param.recurse: false, param.nullOnMissing: true, param.sortOrder: 0, param.expression: null, param.recurse: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.983 P00 DEBUG: storage/iterator::storageItrNew: (driver: *void, path: {"/pgbackrest/repo1/backup/db"}, level: 1, errorOnMissing: false, nullOnMissing: true, recurse: false, sortOrder: 0, targetTime: 0, expression: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/iterator::storageItrNew: => {stack: {size: 1}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageNewItr: => {stack: {size: 1}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageList: => {["backup.history", "backup.info.copy", "backup.info", "20250411-161808F", "latest", "20250411-162129F"]} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageExists: => true logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::pgIsLocal: (pgIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::pgIsLocal: => true logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/pgdata/pg14"}, param.modeFile: 0000, param.modePath: 0000, param.write: false, param.pathExpressionFunction: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/pgdata/pg14"}, modeFile: 0640, modePath: 0750, write: false, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /pgdata/pg14, write: false} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /pgdata/pg14, write: false} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /pgdata/pg14, write: false}, pathExp: {"postmaster.pid"}, param.timeout: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /pgdata/pg14, write: false}, fileExp: {"postmaster.pid"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::repoIsLocal: (repoIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::repoIsLocal: => false logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::protocolRemoteGet: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: protocol/helper::protocolRemoteGet: => {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.984 P00 DEBUG: storage/remote/storage::storageRemoteNew: (modeFile: 0640, modePath: 0750, write: true, targetTime: 0, pathExpressionFunction: (function *), client: {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle}, compressLevel: 3) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.985 P00 DEBUG: storage/remote/storage::storageRemoteNew: => {type: remote, path: /pgbackrest/repo1, write: true} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.985 P00 DEBUG: storage/storage::storageRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/archive.info"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.985 P00 DEBUG: storage/remote/storage::storageRemoteRemove: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.986 P00 DEBUG: storage/remote/storage::storageRemoteRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.986 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.986 P00 DEBUG: storage/storage::storageRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/archive.info.copy"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.986 P00 DEBUG: storage/remote/storage::storageRemoteRemove: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info.copy"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.987 P00 DEBUG: storage/remote/storage::storageRemoteRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.987 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.987 P00 DEBUG: storage/storage::storageRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/backup.info"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.987 P00 DEBUG: storage/remote/storage::storageRemoteRemove: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.988 P00 DEBUG: storage/remote/storage::storageRemoteRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.988 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.988 P00 DEBUG: storage/storage::storageRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/backup.info.copy"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.988 P00 DEBUG: storage/remote/storage::storageRemoteRemove: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info.copy"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.989 P00 DEBUG: storage/remote/storage::storageRemoteRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.989 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.989 P00 DEBUG: storage/storage::storagePathRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, pathExp: {""}, param.errorOnMissing: false, param.recurse: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.989 P00 DEBUG: storage/remote/storage::storageRemotePathRemove: (this: {StorageRemote}, path: {"/pgbackrest/repo1/archive/db"}, recurse: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.991 P00 DEBUG: storage/remote/storage::storageRemotePathRemove: => true logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.991 P00 DEBUG: storage/storage::storagePathRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.991 P00 DEBUG: storage/storage::storagePathRemove: (this: {type: remote, path: /pgbackrest/repo1, write: true}, pathExp: {""}, param.errorOnMissing: false, param.recurse: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:18.991 P00 DEBUG: storage/remote/storage::storageRemotePathRemove: (this: {StorageRemote}, path: {"/pgbackrest/repo1/backup/db"}, recurse: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/remote/storage::storageRemotePathRemove: => true logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/storage::storagePathRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.071 P00 DEBUG: storage/storage::storageRemove: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.errorOnMissing: false) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DEBUG: command/stanza/delete::cmdStanzaDelete: => void logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DETAIL: statistics: {"socket.client":{"total":1},"socket.session":{"total":1},"tls.client":{"total":1},"tls.session":{"total":1}} logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 INFO: stanza-delete command end: completed successfully (109ms) logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:19.072 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | ++ kubectl -n kuttl-test-mutual-maggot get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | ++ awk '{print $1}' logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:19 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-jjsw-0 -- pgbackrest --stanza=db --log-level-console=debug start logger.go:42: 16:24:20 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.338 P00 INFO: start command begin 2.54.2: --exec-id=466-c5061956 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"466-c5061956"}) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: command/control/start::cmdStart: (void) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageExists: => true logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageRemove: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.errorOnMissing: false) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: command/control/start::cmdStart: => void logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 INFO: start command end: completed successfully (4ms) logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:21.339 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-jjsw-0 -- rm /pgdata/sleep-forever logger.go:42: 16:24:21 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:23 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:23 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-l2dk-0 -- pgbackrest --stanza=db --log-level-console=debug start logger.go:42: 16:24:23 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 INFO: start command begin 2.54.2: --exec-id=515-ebfa3826 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"515-ebfa3826"}) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: command/control/start::cmdStart: (void) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageExists: => true logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: true} logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: true} logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageRemove: (this: {type: posix, path: /, write: true}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.errorOnMissing: false) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: storage/storage::storageRemove: => void logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: command/control/start::cmdStart: => void logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 INFO: start command end: completed successfully (3ms) logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:24.628 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:24 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-l2dk-0 -- rm /pgdata/sleep-forever logger.go:42: 16:24:25 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:26 | major-upgrade/23-recreate-stanza | + for pod in '$(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '\''{print $1}'\'')' logger.go:42: 16:24:26 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- pgbackrest --stanza=db --log-level-console=debug start logger.go:42: 16:24:26 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 INFO: start command begin 2.54.2: --exec-id=924-f28320b0 --log-level-console=debug --log-path=/pgdata/pgbackrest/log --stanza=db logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"924-f28320b0"}) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: command/control/start::cmdStart: (void) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 WARN: stop file does not exist for stanza db logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: command/control/start::cmdStart: => void logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 INFO: start command end: completed successfully (2ms) logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | 2025-04-11 16:24:28.196 P00 DEBUG: main::main: => 0 logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- rm /pgdata/sleep-forever logger.go:42: 16:24:28 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:29 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- patronictl resume logger.go:42: 16:24:30 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:24:32 | major-upgrade/23-recreate-stanza | Success: cluster management is resumed logger.go:42: 16:24:32 | major-upgrade/23-recreate-stanza | + sleep 90 logger.go:42: 16:26:02 | major-upgrade/23-recreate-stanza | + kubectl -n kuttl-test-mutual-maggot exec major-upgrade-instance1-qt55-0 -- pgbackrest --stanza=db --log-level-console=debug stanza-create logger.go:42: 16:26:02 | major-upgrade/23-recreate-stanza | Defaulted container "database" out of: database, replication-cert-copy, pgbackrest, pgbackrest-config, postgres-startup (init), extension-relocator-14 (init), extension-installer-14 (init), database-init (init), nss-wrapper-init (init) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.688 P00 DEBUG: common/io/socket/common::sckInit: (block: false, keepAlive: true, tcpKeepAliveCount: 0, tcpKeepAliveIdle: 0, tcpKeepAliveInterval: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.688 P00 DEBUG: common/io/socket/common::sckInit: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 INFO: stanza-create command begin 2.54.2: --exec-id=1293-a355e46c --log-level-console=debug --log-path=/pgdata/pgbackrest/log --pg1-path=/pgdata/pg14 --pg1-port=5432 --pg1-socket-path=/tmp/postgres --repo1-host=major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local. --repo1-host-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --repo1-host-cert-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt --repo1-host-key-file=/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key --repo1-host-type=tls --repo1-host-user=postgres --repo1-path=/pgbackrest/repo1 --stanza=db logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: common/lock::lockInit: (path: {"/tmp/pgbackrest"}, execId: {"1293-a355e46c"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/tmp/pgbackrest"}, param.modeFile: 0000, param.modePath: 0000, param.write: true, param.pathExpressionFunction: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/tmp/pgbackrest"}, modeFile: 0640, modePath: 0750, write: true, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /tmp/pgbackrest, write: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: common/lock::lockInit: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.690 P00 DEBUG: command/lock::cmdLockAcquire: (param.returnOnNoLock: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: command/lock::cmdLockAcquire: => true logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: config/load::cfgLoad: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: command/stanza/create::cmdStanzaCreate: (void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: command/control/common::lockStopTest: (void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/db.stop"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/db.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageExists: (this: {type: posix, path: /, write: false}, pathExp: {"/tmp/pgbackrest/all.stop"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/tmp/pgbackrest/all.stop"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: command/control/common::lockStopTest: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/helper::dbGet: (primaryOnly: false, primaryRequired: true, standbyRequired: n) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/helper::dbGetIdx: (pgIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: protocol/helper::pgIsLocal: (pgIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: protocol/helper::pgIsLocal: => true logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: protocol/helper::pgIsLocal: (pgIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: protocol/helper::pgIsLocal: => true logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/pgdata/pg14"}, param.modeFile: 0000, param.modePath: 0000, param.write: false, param.pathExpressionFunction: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/pgdata/pg14"}, modeFile: 0640, modePath: 0750, write: false, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /pgdata/pg14, write: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /pgdata/pg14, write: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: postgres/client::pgClientNew: (host: {"/tmp/postgres"}, port: 5432, database: {"postgres"}, user: null, timeout: 1800000) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: postgres/client::pgClientNew: => {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/db::dbNew: (client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null, storage: {type: posix, path: /pgdata/pg14, write: false}, applicationName: {"pgBackRest [stanza-create]"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/db::dbNew: => {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/helper::dbGetIdx: => {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: db/db::dbOpen: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.691 P00 DEBUG: postgres/client::pgClientOpen: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.695 P00 DEBUG: postgres/client::pgClientOpen: => {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.695 P00 DEBUG: db/db::dbExec: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, command: {"set search_path = 'pg_catalog'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.695 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: none, query: {"set search_path = 'pg_catalog'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.695 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"set search_path = 'pg_catalog'"}, resultType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: postgres/client::pgClientQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbExec: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbExec: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, command: {"set client_encoding = 'UTF8'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: none, query: {"set client_encoding = 'UTF8'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"set client_encoding = 'UTF8'"}, resultType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: postgres/client::pgClientQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbExec: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: row, query: {"select (select setting from pg_catalog.pg_settings where name = 'server_version_num')::int4, (select setting from pg_catalog.pg_settings where name = 'data_directory')::text, (select setting from pg_catalog.pg_settings where name = 'archive_mode')::text, (select setting from pg_catalog.pg_settings where name = 'archive_command')::text, (select setting from pg_catalog.pg_settings where name = 'checkpoint_timeout')::int4"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.696 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"select (select setting from pg_catalog.pg_settings where name = 'server_version_num')::int4, (select setting from pg_catalog.pg_settings where name = 'data_directory')::text, (select setting from pg_catalog.pg_settings where name = 'archive_mode')::text, (select setting from pg_catalog.pg_settings where name = 'archive_command')::text, (select setting from pg_catalog.pg_settings where name = 'checkpoint_timeout')::int4"}, resultType: row) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: => {Pack} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: => {Pack} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbExec: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, command: {"set application_name = 'pgBackRest [stanza-create]'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: none, query: {"set application_name = 'pgBackRest [stanza-create]'"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"set application_name = 'pgBackRest [stanza-create]'"}, resultType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbExec: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbExec: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, command: {"set max_parallel_workers_per_gather = 0"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: none, query: {"set max_parallel_workers_per_gather = 0"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"set max_parallel_workers_per_gather = 0"}, resultType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: => null logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbExec: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbIsInRecovery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQueryColumn: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, query: {"select pg_catalog.pg_is_in_recovery()"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: (this: {client: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, remoteClient: null}, resultType: column, query: {"select pg_catalog.pg_is_in_recovery()"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: (this: {host: {"/tmp/postgres"}, database: {"postgres"}, user: null, port: 5432, queryTimeout 1800000}, query: {"select pg_catalog.pg_is_in_recovery()"}, resultType: column) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/client::pgClientQuery: => {Pack} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQuery: => {Pack} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbQueryColumn: => {depth: 0, idLast: 0, tagNextId: 0, tagNextType: 0, tagNextValue 0} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbIsInRecovery: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/interface::pgControlFromFile: (storage: {type: posix, path: /pgdata/pg14, write: false}, pgVersionForce: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/interface::pgControlBufferFromFile: (storage: {type: posix, path: /pgdata/pg14, write: false}, pgVersionForce: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: storage/storage::storageNewRead: (this: {type: posix, path: /pgdata/pg14, write: false}, fileExp: {"global/pg_control"}, param.ignoreMissing: false, param.compressible: false, param.offset: 0, param.limit: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: storage/storage::storageNewRead: => {type: posix, name: /pgdata/pg14/global/pg_control, ignoreMissing: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: storage/storage::storageGet: (file: {type: posix, name: /pgdata/pg14/global/pg_control, ignoreMissing: false}, param.exactSize: 512) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: storage/storage::storageGet: => {used: 512, size: 512} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/interface::pgControlBufferFromFile: => {used: 8192, size: 8192} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: postgres/interface::pgControlFromFile: => {version: 140000, systemId: 7492091127174393878, walSegmentSize: 16777216, pageChecksumVersion: 1} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/db::dbOpen: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: db/helper::dbGet: => struct logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 INFO: stanza-create for stanza 'db' on repo1 logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: protocol/helper::repoIsLocal: (repoIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: protocol/helper::repoIsLocal: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: protocol/helper::protocolRemoteGet: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: common/io/socket/client::sckClientNew: (host: {"major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local."}, port: 8432, timeoutConnect: 60000, timeoutSession: 1860000) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: common/io/socket/client::sckClientNew: => {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.702 P00 DEBUG: common/io/tls/client::tlsClientNew: (ioClient: {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}}, host: {"major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local."}, timeoutConnect: 60000, timeoutSession: 1860000, verifyPeer: true, param.caFile: {"/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt"}, param.caPath: null, param.certFile: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt"}, param.keyFile: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key"}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/posix/storage::storagePosixNew: (path: {"/"}, param.modeFile: 0000, param.modePath: 0000, param.write: false, param.pathExpressionFunction: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: (type: posix, path: {"/"}, modeFile: 0640, modePath: 0750, write: false, pathExpressionFunction: null, pathSync: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/posix/storage::storagePosixNewInternal: => {type: posix, path: /, write: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/posix/storage::storagePosixNew: => {type: posix, path: /, write: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/storage::storageInfo: (this: {type: posix, path: /, write: false}, fileExp: {"/etc/pgbackrest/conf.d/~postgres-operator/client-tls.key"}, param.level: 0, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.705 P00 DEBUG: common/io/tls/client::tlsClientNew: => {type: tls, driver: {ioClient: {type: socket, driver: {host: major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local., port: 8432, timeoutConnect: 60000, timeoutSession: 1860000}}, timeoutConnect: 60000, timeoutSession: 1860000, verifyPeer: true}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.717 P00 DEBUG: protocol/helper::protocolRemoteParam: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.717 P00 DEBUG: protocol/helper::protocolRemoteParam: => {["--exec-id=1293-a355e46c", "--lock=db-archive-1.lock", "--lock=db-backup-1.lock", "--log-level-console=off", "--log-level-file=off", "--log-level-stderr=error", "--pg1-path=/pgdata/pg14", "--process=0", "--remote-type=repo", "--repo=1", "--repo1-path=/pgbackrest/repo1", "--stanza=db", "stanza-create:remote"]} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.720 P00 DEBUG: protocol/helper::protocolRemoteGet: => {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: storage/remote/storage::storageRemoteNew: (modeFile: 0640, modePath: 0750, write: false, targetTime: 0, pathExpressionFunction: (function *), client: {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle}, compressLevel: 3) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: storage/remote/storage::storageRemoteNew: => {type: remote, path: /pgbackrest/repo1, write: false} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: protocol/helper::repoIsLocal: (repoIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: protocol/helper::repoIsLocal: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: protocol/helper::protocolRemoteGet: (protocolStorageType: repo, hostIdx: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: protocol/helper::protocolRemoteGet: => {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.721 P00 DEBUG: storage/remote/storage::storageRemoteNew: (modeFile: 0640, modePath: 0750, write: true, targetTime: 0, pathExpressionFunction: (function *), client: {name: remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.', state: idle}, compressLevel: 3) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.722 P00 DEBUG: storage/remote/storage::storageRemoteNew: => {type: remote, path: /pgbackrest/repo1, write: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.722 P00 DEBUG: storage/storage::storageExists: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {"/archive.info"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.722 P00 DEBUG: storage/storage::storageInfo: (this: {type: remote, path: /pgbackrest/repo1, write: false}, fileExp: {"/archive.info"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.722 P00 DEBUG: storage/remote/storage::storageRemoteInfo: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info"}, level: 3, param.followLink: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/remote/storage::storageRemoteInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/storage::storageExists: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {"/archive.info.copy"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/storage::storageInfo: (this: {type: remote, path: /pgbackrest/repo1, write: false}, fileExp: {"/archive.info.copy"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.723 P00 DEBUG: storage/remote/storage::storageRemoteInfo: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info.copy"}, level: 3, param.followLink: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/remote/storage::storageRemoteInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/storage::storageExists: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {"/backup.info"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/storage::storageInfo: (this: {type: remote, path: /pgbackrest/repo1, write: false}, fileExp: {"/backup.info"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.724 P00 DEBUG: storage/remote/storage::storageRemoteInfo: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info"}, level: 3, param.followLink: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/remote/storage::storageRemoteInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/storage::storageExists: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {"/backup.info.copy"}, param.timeout: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/storage::storageInfo: (this: {type: remote, path: /pgbackrest/repo1, write: false}, fileExp: {"/backup.info.copy"}, param.level: 3, param.ignoreMissing: true, param.followLink: true, param.noPathEnforce: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.725 P00 DEBUG: storage/remote/storage::storageRemoteInfo: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info.copy"}, level: 3, param.followLink: true) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/remote/storage::storageRemoteInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/storage::storageInfo: => {StorageInfo} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/storage::storageExists: => false logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/storage::storageList: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.errorOnMissing: false, param.nullOnMissing: false, param.expression: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/storage::storageNewItr: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.level: 1, param.errorOnMissing: false, param.recurse: false, param.nullOnMissing: false, param.sortOrder: 0, param.expression: null, param.recurse: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.726 P00 DEBUG: storage/iterator::storageItrNew: (driver: *void, path: {"/pgbackrest/repo1/archive/db"}, level: 1, errorOnMissing: false, nullOnMissing: false, recurse: false, sortOrder: 0, targetTime: 0, expression: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/iterator::storageItrNew: => {stack: {size: 0}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/storage::storageNewItr: => {stack: {size: 0}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/storage::storageList: => {[]} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/storage::storageList: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.errorOnMissing: false, param.nullOnMissing: false, param.expression: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/storage::storageNewItr: (this: {type: remote, path: /pgbackrest/repo1, write: false}, pathExp: {""}, param.level: 1, param.errorOnMissing: false, param.recurse: false, param.nullOnMissing: false, param.sortOrder: 0, param.expression: null, param.recurse: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.727 P00 DEBUG: storage/iterator::storageItrNew: (driver: *void, path: {"/pgbackrest/repo1/backup/db"}, level: 1, errorOnMissing: false, nullOnMissing: false, recurse: false, sortOrder: 0, targetTime: 0, expression: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: storage/iterator::storageItrNew: => {stack: {size: 0}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: storage/storage::storageNewItr: => {stack: {size: 0}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: storage/storage::storageList: => {[]} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchiveNew: (pgVersion: 140000, pgSystemId: 7492091127174393878) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgNew: (type: archive) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/info::infoNew: (void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/info::infoNew: => {Info} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgNew: => {InfoPg} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchivePgSet: (this: {InfoArchive}, pgVersion: 140000, pgSystemId: 7492091127174393878) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgSet: (this: {InfoPg}, type: archive, pgVersion: 140000, pgSystemId: 7492091127174393878, pgCatalogVersion: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgAdd: (this: {InfoPg}, infoPgData: *{id: 1, version: 140000, systemId: 7492091127174393878, catalogVersion: 0}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgAdd: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgSet: => {InfoPg} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchivePgSet: => {InfoArchive} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchiveNew: => {InfoArchive} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchiveSaveFile: (infoArchive: {InfoArchive}, storage: {type: remote, path: /pgbackrest/repo1, write: true}, fileName: {"/archive.info"}, cipherType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchiveSave: (this: {InfoArchive}, write: {IoWrite}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgSave: (this: {InfoPg}, write: {IoWrite}, callbackFunction: null, callbackData: null) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/info::infoSave: (this: {Info}, write: {IoWrite}, callbackFunction: (function *), callbackData: *void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgDataCurrent: (this: {InfoPg}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgDataCurrentId: (this: {InfoPg}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgDataCurrentId: => 0 logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgDataCurrent: => {id: 1, version: 140000, systemId: 7492091127174393878, catalogVersion: 0} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/info::infoSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoPg::infoPgSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: info/infoArchive::infoArchiveSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/archive.info"}, param.modeFile: 0000, param.modePath: 0000, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: false, param.noTruncate: false, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.728 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info"}, param.modeFile: 0640, param.modePath: 0750, param.user: null, param.group: null, param.timeModified: 0, param.createPath: true, param.syncFile: true, param.syncPath: true, param.atomic: true, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.729 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: => {type: remote, name: /pgbackrest/repo1/archive/db/archive.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.729 P00 DEBUG: storage/storage::storageNewWrite: => {type: remote, name: /pgbackrest/repo1/archive/db/archive.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.729 P00 DEBUG: storage/storage::storagePut: (file: {type: remote, name: /pgbackrest/repo1/archive/db/archive.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true}, buffer: {used: 255, size: 1048576}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/storage::storagePut: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/archive.info.copy"}, param.modeFile: 0000, param.modePath: 0000, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: false, param.noTruncate: false, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: (this: {StorageRemote}, file: {"/pgbackrest/repo1/archive/db/archive.info.copy"}, param.modeFile: 0640, param.modePath: 0750, param.user: null, param.group: null, param.timeModified: 0, param.createPath: true, param.syncFile: true, param.syncPath: true, param.atomic: true, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: => {type: remote, name: /pgbackrest/repo1/archive/db/archive.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/storage::storageNewWrite: => {type: remote, name: /pgbackrest/repo1/archive/db/archive.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.738 P00 DEBUG: storage/storage::storagePut: (file: {type: remote, name: /pgbackrest/repo1/archive/db/archive.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true}, buffer: {used: 255, size: 1048576}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: storage/storage::storagePut: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoArchive::infoArchiveSaveFile: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupNew: (pgVersion: 140000, pgSystemId: 7492091127174393878, pgCatalogVersion: 202107181) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgNew: (type: backup) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/info::infoNew: (void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/info::infoNew: => {Info} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgNew: => {InfoPg} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupPgSet: (this: {InfoBackup}, pgVersion: 140000, pgSystemId: 7492091127174393878, pgCatalogVersion: 202107181) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgSet: (this: {InfoPg}, type: backup, pgVersion: 140000, pgSystemId: 7492091127174393878, pgCatalogVersion: 202107181) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgAdd: (this: {InfoPg}, infoPgData: *{id: 1, version: 140000, systemId: 7492091127174393878, catalogVersion: 202107181}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgAdd: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgSet: => {InfoPg} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupPgSet: => {InfoBackup} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupNew: => {InfoBackup} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupSaveFile: (infoBackup: {InfoBackup}, storage: {type: remote, path: /pgbackrest/repo1, write: true}, fileName: {"/backup.info"}, cipherType: none) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoBackup::infoBackupSave: (this: {InfoBackup}, write: {IoWrite}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgSave: (this: {InfoPg}, write: {IoWrite}, callbackFunction: (function *), callbackData: *void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/info::infoSave: (this: {Info}, write: {IoWrite}, callbackFunction: (function *), callbackData: *void) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgDataCurrent: (this: {InfoPg}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgDataCurrentId: (this: {InfoPg}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgDataCurrentId: => 0 logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.747 P00 DEBUG: info/infoPg::infoPgDataCurrent: => {id: 1, version: 140000, systemId: 7492091127174393878, catalogVersion: 202107181} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: info/info::infoSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: info/infoPg::infoPgSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: info/infoBackup::infoBackupSave: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/backup.info"}, param.modeFile: 0000, param.modePath: 0000, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: false, param.noTruncate: false, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info"}, param.modeFile: 0640, param.modePath: 0750, param.user: null, param.group: null, param.timeModified: 0, param.createPath: true, param.syncFile: true, param.syncPath: true, param.atomic: true, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: => {type: remote, name: /pgbackrest/repo1/backup/db/backup.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: storage/storage::storageNewWrite: => {type: remote, name: /pgbackrest/repo1/backup/db/backup.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.748 P00 DEBUG: storage/storage::storagePut: (file: {type: remote, name: /pgbackrest/repo1/backup/db/backup.info, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true}, buffer: {used: 372, size: 1048576}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/storage::storagePut: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/storage::storageNewWrite: (this: {type: remote, path: /pgbackrest/repo1, write: true}, fileExp: {"/backup.info.copy"}, param.modeFile: 0000, param.modePath: 0000, param.user: null, param.group: null, param.timeModified: 0, param.noCreatePath: false, param.noSyncFile: false, param.noSyncPath: false, param.noAtomic: false, param.noTruncate: false, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: (this: {StorageRemote}, file: {"/pgbackrest/repo1/backup/db/backup.info.copy"}, param.modeFile: 0640, param.modePath: 0750, param.user: null, param.group: null, param.timeModified: 0, param.createPath: true, param.syncFile: true, param.syncPath: true, param.atomic: true, param.compressible: false) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/remote/storage::storageRemoteNewWrite: => {type: remote, name: /pgbackrest/repo1/backup/db/backup.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/storage::storageNewWrite: => {type: remote, name: /pgbackrest/repo1/backup/db/backup.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.757 P00 DEBUG: storage/storage::storagePut: (file: {type: remote, name: /pgbackrest/repo1/backup/db/backup.info.copy, modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true}, buffer: {used: 372, size: 1048576}) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: storage/storage::storagePut: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: info/infoBackup::infoBackupSaveFile: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: command/stanza/create::cmdStanzaCreate: => void logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: command/exit::exitSafe: (result: 0, error: false, signalType: 0) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DETAIL: statistics: {"socket.client":{"total":1},"socket.session":{"total":1},"tls.client":{"total":1},"tls.session":{"total":1}} logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 INFO: stanza-create command end: completed successfully (78ms) logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: command/exit::exitSafe: => 0 logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | 2025-04-11 16:26:03.765 P00 DEBUG: main::main: => 0 logger.go:42: 16:26:03 | major-upgrade/23-recreate-stanza | test step completed 23-recreate-stanza logger.go:42: 16:26:03 | major-upgrade/27-run-backup | starting test step 27-run-backup logger.go:42: 16:26:04 | major-upgrade/27-run-backup | PerconaPGBackup:kuttl-test-mutual-maggot/backup-after-13-to-14 created logger.go:42: 16:26:31 | major-upgrade/27-run-backup | test step completed 27-run-backup logger.go:42: 16:26:31 | major-upgrade/28-run-restore | starting test step 28-run-restore logger.go:42: 16:26:31 | major-upgrade/28-run-restore | PerconaPGRestore:kuttl-test-mutual-maggot/restore-after-13-to-14 created logger.go:42: 16:26:32 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:26:32 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:26:32 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 6m23s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 6m23s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 7m21s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 24s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 4/4 Running 0 5m20s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 4/4 Running 0 5m21s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 6m5s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 6m3s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 6m3s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 6m3s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 6m4s logger.go:42: 16:26:32 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 9m41s logger.go:42: 16:26:34 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:26:37 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:26:38 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:26:38 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 6m28s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 6m28s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 7m26s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 29s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Terminating 0 6m10s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 6m8s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 6m8s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 6m8s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 6m9s logger.go:42: 16:26:38 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 9m46s logger.go:42: 16:26:39 | major-upgrade/28-run-restore | Error from server (NotFound): pods "major-upgrade-instance1-qt55-0" not found logger.go:42: 16:26:40 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:26:40 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:26:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 6m31s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 6m31s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 7m29s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 32s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-pgbackrest-restore-jgspf 0/1 Init:0/1 0 1s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 6m11s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 6m11s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 6m11s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 6m12s logger.go:42: 16:26:40 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 9m49s logger.go:42: 16:26:41 | major-upgrade/28-run-restore | No resources found in kuttl-test-mutual-maggot namespace. logger.go:42: 16:26:57 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:26:58 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:26:58 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 6m48s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 6m48s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 7m46s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 49s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 0/4 Init:0/5 0 2s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-pgbackrest-restore-jgspf 0/1 Completed 0 18s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 6m28s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 6m28s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 6m28s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 6m29s logger.go:42: 16:26:58 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 10m logger.go:42: 16:26:59 | major-upgrade/28-run-restore | Waiting for major-upgrade-instance1-qt55-0 to start running logger.go:42: 16:27:15 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:27:16 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:27:16 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 7m6s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 7m6s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 8m4s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 67s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 0/4 PodInitializing 0 20s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-pgbackrest-restore-jgspf 0/1 Completed 0 36s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 6m46s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 6m46s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 6m46s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 6m47s logger.go:42: 16:27:16 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 10m logger.go:42: 16:27:17 | major-upgrade/28-run-restore | Waiting for major-upgrade-instance1-qt55-0 to start running logger.go:42: 16:27:33 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:27:34 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:27:34 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 7m24s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 7m24s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 8m22s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 14s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 2s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 85s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 0/4 Init:2/5 0 12s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 0/4 Init:2/5 0 12s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 38s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 7m4s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 7m4s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 7m4s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 7m5s logger.go:42: 16:27:34 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 10m logger.go:42: 16:27:35 | major-upgrade/28-run-restore | Waiting for major-upgrade-instance1-jjsw-0 to start running logger.go:42: 16:27:36 | major-upgrade/28-run-restore | Waiting for major-upgrade-instance1-l2dk-0 to start running logger.go:42: 16:27:36 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:27:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:27:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:27:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:27:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:27:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:27:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:27:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:27:38 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:27:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:27:54 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:27:55 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:27:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 7m46s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 7m46s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 8m44s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 36s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 24s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 3s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 107s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 34s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 34s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 60s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 7m26s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 7m26s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 7m26s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 7m27s logger.go:42: 16:27:55 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 11m logger.go:42: 16:27:56 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:27:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.466 UTC [524] LOG: invalid record length at 0/130000A0: wanted 24, got 0 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.468 UTC [562] LOG: restarted WAL streaming at 0/13000000 on timeline 3 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.580 UTC [562] LOG: replication terminated by primary server logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.580 UTC [562] DETAIL: End of WAL reached on timeline 3 at 0/170000A0. logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.580 UTC [562] FATAL: could not send end-of-streaming message to primary: SSL connection has been closed unexpectedly logger.go:42: 16:27:58 | major-upgrade/28-run-restore | no COPY in progress logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.718 UTC [524] LOG: invalid record length at 0/170000A0: wanted 24, got 0 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.732 UTC [891] FATAL: could not connect to the primary server: connection to server at "major-upgrade-instance1-qt55-0.major-upgrade-pods" (10.249.177.13), port 5432 failed: FATAL: the database system is shutting down logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.735 UTC [521] LOG: received fast shutdown request logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.739 UTC [521] LOG: aborting any active transactions logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.739 UTC [549] FATAL: terminating connection due to administrator command logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.739 UTC [548] FATAL: terminating connection due to administrator command logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.743 UTC [531] LOG: shutting down logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.779 UTC [521] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.814 UTC [521] LOG: database system is shut down logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.885 UTC [35] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.885 UTC [35] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.885 UTC [35] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.912 UTC [35] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.935 UTC [38] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.957 UTC [38] LOG: restored log file "00000004.history" from archive logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.980 UTC [38] LOG: entering standby mode logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.002 UTC [38] LOG: restored log file "00000004.history" from archive logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.330 UTC [38] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.364 UTC [38] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.367 UTC [35] LOG: startup process (PID 38) exited with exit code 1 logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.367 UTC [35] LOG: aborting startup due to startup process failure logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.367 UTC [35] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:27:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.403 UTC [35] LOG: database system is shut down logger.go:42: 16:27:59 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.845 UTC [554] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:26:34.880 UTC [554] LOG: database system is shut down logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.754 UTC [35] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.755 UTC [35] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.755 UTC [35] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.781 UTC [35] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.798 UTC [38] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.821 UTC [38] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.842 UTC [38] LOG: entering standby mode logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:38.866 UTC [38] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.195 UTC [38] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.228 UTC [38] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.230 UTC [35] LOG: startup process (PID 38) exited with exit code 1 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.230 UTC [35] LOG: aborting startup due to startup process failure logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.230 UTC [35] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:39.266 UTC [35] LOG: database system is shut down logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.795 UTC [131] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.795 UTC [131] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.795 UTC [131] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.821 UTC [131] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.831 UTC [134] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.854 UTC [134] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.876 UTC [134] LOG: entering standby mode logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:27:59.898 UTC [134] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.219 UTC [134] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.257 UTC [134] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.260 UTC [131] LOG: startup process (PID 134) exited with exit code 1 logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.260 UTC [131] LOG: aborting startup due to startup process failure logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.260 UTC [131] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:01 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.302 UTC [131] LOG: database system is shut down logger.go:42: 16:28:01 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:28:03 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:28:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:28:19 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:28:20 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:28:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 8m11s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 8m11s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 9m9s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 61s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 49s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 28s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 2m12s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 59s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 59s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 85s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 7m51s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 7m51s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 7m51s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 7m52s logger.go:42: 16:28:20 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 11m logger.go:42: 16:28:21 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.264 UTC [124] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.301 UTC [124] LOG: database system is shut down logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.798 UTC [159] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.798 UTC [159] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.798 UTC [159] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.825 UTC [159] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.835 UTC [162] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.858 UTC [162] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.879 UTC [162] LOG: entering standby mode logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.901 UTC [162] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.222 UTC [162] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.258 UTC [162] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.261 UTC [159] LOG: startup process (PID 162) exited with exit code 1 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.261 UTC [159] LOG: aborting startup due to startup process failure logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.261 UTC [159] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.297 UTC [159] LOG: database system is shut down logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.814 UTC [194] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.815 UTC [194] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.815 UTC [194] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.841 UTC [194] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.851 UTC [197] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.875 UTC [197] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.897 UTC [197] LOG: entering standby mode logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.920 UTC [197] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.042 UTC [197] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.081 UTC [197] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: startup process (PID 197) exited with exit code 1 logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: aborting startup due to startup process failure logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.121 UTC [194] LOG: database system is shut down logger.go:42: 16:28:24 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.260 UTC [131] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:00.302 UTC [131] LOG: database system is shut down logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.774 UTC [174] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.774 UTC [174] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.774 UTC [174] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.800 UTC [174] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.809 UTC [177] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.832 UTC [177] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.854 UTC [177] LOG: entering standby mode logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:09.875 UTC [177] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.195 UTC [177] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.230 UTC [177] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.233 UTC [174] LOG: startup process (PID 177) exited with exit code 1 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.233 UTC [174] LOG: aborting startup due to startup process failure logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.233 UTC [174] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.289 UTC [174] LOG: database system is shut down logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.795 UTC [217] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.796 UTC [217] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.796 UTC [217] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.822 UTC [217] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.832 UTC [220] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.855 UTC [220] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.875 UTC [220] LOG: entering standby mode logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.897 UTC [220] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.017 UTC [220] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.053 UTC [220] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: startup process (PID 220) exited with exit code 1 logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: aborting startup due to startup process failure logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:25 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.096 UTC [217] LOG: database system is shut down logger.go:42: 16:28:26 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:28:28 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:28:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:28:44 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:28:45 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:28:45 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 8m36s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 8m36s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 9m34s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 1/1 Running 0 11s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 86s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 74s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 53s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 2m37s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 84s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 84s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 110s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 8m16s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 8m16s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 8m16s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 8m17s logger.go:42: 16:28:45 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 11m logger.go:42: 16:28:46 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.261 UTC [159] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.297 UTC [159] LOG: database system is shut down logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.814 UTC [194] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.815 UTC [194] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.815 UTC [194] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.841 UTC [194] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.851 UTC [197] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.875 UTC [197] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.897 UTC [197] LOG: entering standby mode logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.920 UTC [197] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.042 UTC [197] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.081 UTC [197] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: startup process (PID 197) exited with exit code 1 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: aborting startup due to startup process failure logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.083 UTC [194] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.121 UTC [194] LOG: database system is shut down logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.787 UTC [256] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.787 UTC [256] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.787 UTC [256] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.814 UTC [256] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.824 UTC [259] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.846 UTC [259] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.868 UTC [259] LOG: entering standby mode logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.890 UTC [259] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.214 UTC [259] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.250 UTC [259] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.253 UTC [256] LOG: startup process (PID 259) exited with exit code 1 logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.253 UTC [256] LOG: aborting startup due to startup process failure logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.253 UTC [256] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.288 UTC [256] LOG: database system is shut down logger.go:42: 16:28:49 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.233 UTC [174] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:10.289 UTC [174] LOG: database system is shut down logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.795 UTC [217] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.796 UTC [217] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.796 UTC [217] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.822 UTC [217] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.832 UTC [220] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.855 UTC [220] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.875 UTC [220] LOG: entering standby mode logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:19.897 UTC [220] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.017 UTC [220] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.053 UTC [220] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: startup process (PID 220) exited with exit code 1 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: aborting startup due to startup process failure logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.055 UTC [217] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:20.096 UTC [217] LOG: database system is shut down logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.778 UTC [295] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.778 UTC [295] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.778 UTC [295] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.804 UTC [295] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.813 UTC [298] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.836 UTC [298] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.857 UTC [298] LOG: entering standby mode logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.879 UTC [298] LOG: restored log file "00000004.history" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.200 UTC [298] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.238 UTC [298] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.241 UTC [295] LOG: startup process (PID 298) exited with exit code 1 logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.241 UTC [295] LOG: aborting startup due to startup process failure logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.241 UTC [295] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:28:51 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.293 UTC [295] LOG: database system is shut down logger.go:42: 16:28:51 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:53 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:53 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:53 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:28:53 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:28:53 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:28:53 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:28:53 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:28:53 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:28:53 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:28:53 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:29:09 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:29:10 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:29:10 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 9m1s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 9m1s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 9m59s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 36s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 111s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 99s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 78s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 3m2s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 109s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 109s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 2m15s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 8m41s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 8m41s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 8m41s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 8m42s logger.go:42: 16:29:10 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 12m logger.go:42: 16:29:11 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.253 UTC [256] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.288 UTC [256] LOG: database system is shut down logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.789 UTC [352] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.789 UTC [352] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.789 UTC [352] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.816 UTC [352] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.826 UTC [355] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.849 UTC [355] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.871 UTC [355] LOG: entering standby mode logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.894 UTC [355] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.016 UTC [355] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.056 UTC [355] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.059 UTC [352] LOG: startup process (PID 355) exited with exit code 1 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.059 UTC [352] LOG: aborting startup due to startup process failure logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.059 UTC [352] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.095 UTC [352] LOG: database system is shut down logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.778 UTC [387] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.779 UTC [387] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.779 UTC [387] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.805 UTC [387] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.815 UTC [390] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.839 UTC [390] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.860 UTC [390] LOG: entering standby mode logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.881 UTC [390] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.202 UTC [390] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.238 UTC [390] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: startup process (PID 390) exited with exit code 1 logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: aborting startup due to startup process failure logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:13 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.276 UTC [387] LOG: database system is shut down logger.go:42: 16:29:14 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.241 UTC [295] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:40.293 UTC [295] LOG: database system is shut down logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.785 UTC [388] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.785 UTC [388] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.785 UTC [388] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.816 UTC [388] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.825 UTC [391] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.849 UTC [391] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.871 UTC [391] LOG: entering standby mode logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:28:59.894 UTC [391] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.014 UTC [391] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.050 UTC [391] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.052 UTC [388] LOG: startup process (PID 391) exited with exit code 1 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.052 UTC [388] LOG: aborting startup due to startup process failure logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.052 UTC [388] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.093 UTC [388] LOG: database system is shut down logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.766 UTC [422] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.767 UTC [422] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.767 UTC [422] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.793 UTC [422] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.802 UTC [425] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.832 UTC [425] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.858 UTC [425] LOG: entering standby mode logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.880 UTC [425] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.201 UTC [425] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.251 UTC [425] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: startup process (PID 425) exited with exit code 1 logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: aborting startup due to startup process failure logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:16 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.308 UTC [422] LOG: database system is shut down logger.go:42: 16:29:16 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:29:18 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:29:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:29:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:29:18 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:29:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:29:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:29:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:29:18 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:29:18 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:29:18 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:29:34 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:29:35 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:29:35 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 9m26s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 9m26s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 10m logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 61s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 2m16s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 2m4s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 103s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 3m27s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 2m14s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 2m14s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 2m40s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 9m6s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 9m6s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 9m6s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 9m7s logger.go:42: 16:29:35 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 12m logger.go:42: 16:29:36 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.059 UTC [352] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.095 UTC [352] LOG: database system is shut down logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.778 UTC [387] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.779 UTC [387] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.779 UTC [387] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.805 UTC [387] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.815 UTC [390] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.839 UTC [390] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.860 UTC [390] LOG: entering standby mode logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.881 UTC [390] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.202 UTC [390] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.238 UTC [390] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: startup process (PID 390) exited with exit code 1 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: aborting startup due to startup process failure logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.241 UTC [387] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.276 UTC [387] LOG: database system is shut down logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.780 UTC [450] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.780 UTC [450] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.780 UTC [450] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.806 UTC [450] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.816 UTC [453] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.839 UTC [453] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.861 UTC [453] LOG: entering standby mode logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.882 UTC [453] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.203 UTC [453] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.239 UTC [453] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.241 UTC [450] LOG: startup process (PID 453) exited with exit code 1 logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.241 UTC [450] LOG: aborting startup due to startup process failure logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.241 UTC [450] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:38 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.277 UTC [450] LOG: database system is shut down logger.go:42: 16:29:39 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.052 UTC [388] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:00.093 UTC [388] LOG: database system is shut down logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.766 UTC [422] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.767 UTC [422] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.767 UTC [422] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.793 UTC [422] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.802 UTC [425] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.832 UTC [425] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.858 UTC [425] LOG: entering standby mode logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:09.880 UTC [425] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.201 UTC [425] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.251 UTC [425] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: startup process (PID 425) exited with exit code 1 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: aborting startup due to startup process failure logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.256 UTC [422] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:10.308 UTC [422] LOG: database system is shut down logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.753 UTC [492] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.754 UTC [492] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.754 UTC [492] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.780 UTC [492] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.788 UTC [495] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.812 UTC [495] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.834 UTC [495] LOG: entering standby mode logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:29.856 UTC [495] LOG: restored log file "00000004.history" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.176 UTC [495] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.213 UTC [495] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.216 UTC [492] LOG: startup process (PID 495) exited with exit code 1 logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.216 UTC [492] LOG: aborting startup due to startup process failure logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.216 UTC [492] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:29:40 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.278 UTC [492] LOG: database system is shut down logger.go:42: 16:29:41 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:29:43 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:29:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:29:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:29:43 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:29:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:29:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:29:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:29:43 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:29:43 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:29:43 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:29:59 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:30:00 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:30:00 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 9m51s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 9m51s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 10m logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 86s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 2m41s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 2m29s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 2m8s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 3m52s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 2m39s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 2m39s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 3m5s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 9m31s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 9m31s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 9m31s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 9m32s logger.go:42: 16:30:00 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 13m logger.go:42: 16:30:01 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.241 UTC [450] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.277 UTC [450] LOG: database system is shut down logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.780 UTC [495] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.780 UTC [495] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.780 UTC [495] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.807 UTC [495] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.817 UTC [498] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.840 UTC [498] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.861 UTC [498] LOG: entering standby mode logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:39.883 UTC [498] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.205 UTC [498] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.244 UTC [498] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.246 UTC [495] LOG: startup process (PID 498) exited with exit code 1 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.246 UTC [495] LOG: aborting startup due to startup process failure logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.246 UTC [495] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.283 UTC [495] LOG: database system is shut down logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.783 UTC [559] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.784 UTC [559] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.784 UTC [559] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.810 UTC [559] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.820 UTC [562] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.843 UTC [562] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.865 UTC [562] LOG: entering standby mode logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.888 UTC [562] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.209 UTC [562] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.245 UTC [562] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: startup process (PID 562) exited with exit code 1 logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: aborting startup due to startup process failure logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:03 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.298 UTC [559] LOG: database system is shut down logger.go:42: 16:30:03 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.216 UTC [492] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:30.278 UTC [492] LOG: database system is shut down logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.770 UTC [553] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.771 UTC [553] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.771 UTC [553] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.797 UTC [553] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.807 UTC [556] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.830 UTC [556] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.854 UTC [556] LOG: entering standby mode logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:49.877 UTC [556] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.199 UTC [556] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.235 UTC [556] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.238 UTC [553] LOG: startup process (PID 556) exited with exit code 1 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.238 UTC [553] LOG: aborting startup due to startup process failure logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.238 UTC [553] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:50.300 UTC [553] LOG: database system is shut down logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.756 UTC [589] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.757 UTC [589] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.757 UTC [589] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.783 UTC [589] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.792 UTC [592] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.816 UTC [592] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.837 UTC [592] LOG: entering standby mode logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.859 UTC [592] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.180 UTC [592] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.217 UTC [592] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.220 UTC [589] LOG: startup process (PID 592) exited with exit code 1 logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.220 UTC [589] LOG: aborting startup due to startup process failure logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.220 UTC [589] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:05 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.286 UTC [589] LOG: database system is shut down logger.go:42: 16:30:06 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:08 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:08 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:30:08 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:30:08 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:08 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:30:24 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:30:25 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:30:25 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 10m logger.go:42: 16:30:25 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 10m logger.go:42: 16:30:25 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 11m logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 110s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 3m5s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 2m53s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 2m32s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 4m16s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 3m3s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 3m3s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 3m29s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 9m55s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 9m55s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 9m55s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 9m56s logger.go:42: 16:30:25 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 13m logger.go:42: 16:30:26 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.246 UTC [495] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:40.283 UTC [495] LOG: database system is shut down logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.783 UTC [559] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.784 UTC [559] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.784 UTC [559] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.810 UTC [559] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.820 UTC [562] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.843 UTC [562] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.865 UTC [562] LOG: entering standby mode logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:29:59.888 UTC [562] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.209 UTC [562] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.245 UTC [562] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: startup process (PID 562) exited with exit code 1 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: aborting startup due to startup process failure logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.247 UTC [559] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.298 UTC [559] LOG: database system is shut down logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.792 UTC [622] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.792 UTC [622] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.792 UTC [622] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.819 UTC [622] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.830 UTC [625] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.853 UTC [625] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.874 UTC [625] LOG: entering standby mode logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.897 UTC [625] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.219 UTC [625] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.255 UTC [625] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.258 UTC [622] LOG: startup process (PID 625) exited with exit code 1 logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.258 UTC [622] LOG: aborting startup due to startup process failure logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.258 UTC [622] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:28 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.295 UTC [622] LOG: database system is shut down logger.go:42: 16:30:29 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.220 UTC [589] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:00.286 UTC [589] LOG: database system is shut down logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.759 UTC [652] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.759 UTC [652] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.759 UTC [652] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.785 UTC [652] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.793 UTC [655] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.816 UTC [655] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.837 UTC [655] LOG: entering standby mode logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:19.858 UTC [655] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.179 UTC [655] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.219 UTC [655] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.222 UTC [652] LOG: startup process (PID 655) exited with exit code 1 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.222 UTC [652] LOG: aborting startup due to startup process failure logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.222 UTC [652] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.285 UTC [652] LOG: database system is shut down logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.760 UTC [697] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.760 UTC [697] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.761 UTC [697] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.787 UTC [697] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.796 UTC [700] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.819 UTC [700] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.842 UTC [700] LOG: entering standby mode logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.865 UTC [700] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.186 UTC [700] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.224 UTC [700] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: startup process (PID 700) exited with exit code 1 logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: aborting startup due to startup process failure logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:30 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.293 UTC [697] LOG: database system is shut down logger.go:42: 16:30:31 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:33 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:33 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:30:33 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:30:33 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:33 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:30:49 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:30:50 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:30:50 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 11m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 2m15s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 3m30s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 3m18s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 2m57s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 4m41s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 3m28s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 3m28s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 3m54s logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 10m logger.go:42: 16:30:50 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 13m logger.go:42: 16:30:51 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.258 UTC [622] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.295 UTC [622] LOG: database system is shut down logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.803 UTC [666] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.804 UTC [666] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.804 UTC [666] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.832 UTC [666] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.842 UTC [669] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.865 UTC [669] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.887 UTC [669] LOG: entering standby mode logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.910 UTC [669] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.232 UTC [669] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.268 UTC [669] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.270 UTC [666] LOG: startup process (PID 669) exited with exit code 1 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.270 UTC [666] LOG: aborting startup due to startup process failure logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.270 UTC [666] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.308 UTC [666] LOG: database system is shut down logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.849 UTC [718] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.861 UTC [721] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.887 UTC [721] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.911 UTC [721] LOG: entering standby mode logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.934 UTC [721] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.257 UTC [721] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.293 UTC [721] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: startup process (PID 721) exited with exit code 1 logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: aborting startup due to startup process failure logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:53 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.334 UTC [718] LOG: database system is shut down logger.go:42: 16:30:53 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.222 UTC [652] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:20.285 UTC [652] LOG: database system is shut down logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.760 UTC [697] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.760 UTC [697] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.761 UTC [697] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.787 UTC [697] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.796 UTC [700] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.819 UTC [700] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.842 UTC [700] LOG: entering standby mode logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:29.865 UTC [700] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.186 UTC [700] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.224 UTC [700] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: startup process (PID 700) exited with exit code 1 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: aborting startup due to startup process failure logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.226 UTC [697] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.293 UTC [697] LOG: database system is shut down logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.770 UTC [759] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.771 UTC [759] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.771 UTC [759] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.797 UTC [759] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.806 UTC [762] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.830 UTC [762] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.853 UTC [762] LOG: entering standby mode logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.876 UTC [762] LOG: restored log file "00000004.history" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.196 UTC [762] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.233 UTC [762] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.236 UTC [759] LOG: startup process (PID 762) exited with exit code 1 logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.236 UTC [759] LOG: aborting startup due to startup process failure logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.236 UTC [759] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:30:55 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.293 UTC [759] LOG: database system is shut down logger.go:42: 16:30:56 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:58 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:30:58 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:30:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:30:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:30:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:30:58 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:30:58 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:30:58 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:31:14 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:31:15 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:31:15 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 11m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 11m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 12m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 2m40s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 3m55s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 3m43s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 3m22s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 5m6s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 3m53s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 3m53s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 4m19s logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 10m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 10m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 10m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 10m logger.go:42: 16:31:15 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 14m logger.go:42: 16:31:16 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.270 UTC [666] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:30.308 UTC [666] LOG: database system is shut down logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.819 UTC [718] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.849 UTC [718] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.861 UTC [721] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.887 UTC [721] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.911 UTC [721] LOG: entering standby mode logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:49.934 UTC [721] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.257 UTC [721] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.293 UTC [721] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: startup process (PID 721) exited with exit code 1 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: aborting startup due to startup process failure logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.295 UTC [718] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.334 UTC [718] LOG: database system is shut down logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.802 UTC [762] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.803 UTC [762] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.803 UTC [762] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.829 UTC [762] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.840 UTC [765] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.865 UTC [765] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.887 UTC [765] LOG: entering standby mode logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.910 UTC [765] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.232 UTC [765] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.269 UTC [765] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.271 UTC [762] LOG: startup process (PID 765) exited with exit code 1 logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.271 UTC [762] LOG: aborting startup due to startup process failure logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.271 UTC [762] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:18 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.310 UTC [762] LOG: database system is shut down logger.go:42: 16:31:18 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.236 UTC [759] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:50.293 UTC [759] LOG: database system is shut down logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.762 UTC [802] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.763 UTC [802] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.763 UTC [802] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.789 UTC [802] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.798 UTC [805] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.821 UTC [805] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.843 UTC [805] LOG: entering standby mode logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:30:59.865 UTC [805] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.186 UTC [805] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.224 UTC [805] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.227 UTC [802] LOG: startup process (PID 805) exited with exit code 1 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.227 UTC [802] LOG: aborting startup due to startup process failure logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.227 UTC [802] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.293 UTC [802] LOG: database system is shut down logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.758 UTC [857] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.759 UTC [857] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.759 UTC [857] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.784 UTC [857] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.794 UTC [860] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.817 UTC [860] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.840 UTC [860] LOG: entering standby mode logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.862 UTC [860] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.183 UTC [860] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.227 UTC [860] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: startup process (PID 860) exited with exit code 1 logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: aborting startup due to startup process failure logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:20 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.290 UTC [857] LOG: database system is shut down logger.go:42: 16:31:21 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:31:23 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:31:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:31:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:31:23 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:31:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:31:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:31:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:31:23 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:31:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:31:23 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:31:39 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:31:40 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:31:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 12m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 3m5s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 4m20s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 4m8s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 3m47s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 5m31s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 4m18s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 4m18s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 4m44s logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 11m logger.go:42: 16:31:40 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 14m logger.go:42: 16:31:41 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.271 UTC [762] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.310 UTC [762] LOG: database system is shut down logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.818 UTC [831] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.819 UTC [831] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.819 UTC [831] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.845 UTC [831] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.856 UTC [834] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.879 UTC [834] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.902 UTC [834] LOG: entering standby mode logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.925 UTC [834] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.250 UTC [834] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.287 UTC [834] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.289 UTC [831] LOG: startup process (PID 834) exited with exit code 1 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.289 UTC [831] LOG: aborting startup due to startup process failure logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.289 UTC [831] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.328 UTC [831] LOG: database system is shut down logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.882 UTC [867] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.893 UTC [870] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.918 UTC [870] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.941 UTC [870] LOG: entering standby mode logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.963 UTC [870] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.285 UTC [870] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.324 UTC [870] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: startup process (PID 870) exited with exit code 1 logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: aborting startup due to startup process failure logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:43 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.367 UTC [867] LOG: database system is shut down logger.go:42: 16:31:44 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.227 UTC [802] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:00.293 UTC [802] LOG: database system is shut down logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.758 UTC [857] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.759 UTC [857] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.759 UTC [857] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.784 UTC [857] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.794 UTC [860] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.817 UTC [860] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.840 UTC [860] LOG: entering standby mode logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:19.862 UTC [860] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.183 UTC [860] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.227 UTC [860] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: startup process (PID 860) exited with exit code 1 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: aborting startup due to startup process failure logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.230 UTC [857] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.290 UTC [857] LOG: database system is shut down logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.769 UTC [902] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.770 UTC [902] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.770 UTC [902] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.795 UTC [902] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.804 UTC [905] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.827 UTC [905] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.849 UTC [905] LOG: entering standby mode logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.871 UTC [905] LOG: restored log file "00000004.history" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.192 UTC [905] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.232 UTC [905] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.235 UTC [902] LOG: startup process (PID 905) exited with exit code 1 logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.235 UTC [902] LOG: aborting startup due to startup process failure logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.235 UTC [902] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:31:45 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.287 UTC [902] LOG: database system is shut down logger.go:42: 16:31:46 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:31:48 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:31:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:31:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:31:48 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:31:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:31:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:31:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:31:48 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:31:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:31:48 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:32:04 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:32:05 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:32:05 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 12m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 3m31s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 4m46s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 4m34s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 4m13s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 5m57s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 4m44s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 4m44s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 5m10s logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 11m logger.go:42: 16:32:05 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 15m logger.go:42: 16:32:06 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.289 UTC [831] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:20.328 UTC [831] LOG: database system is shut down logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.855 UTC [867] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.882 UTC [867] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.893 UTC [870] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.918 UTC [870] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.941 UTC [870] LOG: entering standby mode logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:29.963 UTC [870] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.285 UTC [870] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.324 UTC [870] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: startup process (PID 870) exited with exit code 1 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: aborting startup due to startup process failure logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.326 UTC [867] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.367 UTC [867] LOG: database system is shut down logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.844 UTC [927] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.845 UTC [927] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.845 UTC [927] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.873 UTC [927] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.884 UTC [930] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.908 UTC [930] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.933 UTC [930] LOG: entering standby mode logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.957 UTC [930] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.279 UTC [930] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.316 UTC [930] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.318 UTC [927] LOG: startup process (PID 930) exited with exit code 1 logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.318 UTC [927] LOG: aborting startup due to startup process failure logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.318 UTC [927] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:08 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.356 UTC [927] LOG: database system is shut down logger.go:42: 16:32:09 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.235 UTC [902] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:30.287 UTC [902] LOG: database system is shut down logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.807 UTC [973] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.808 UTC [973] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.808 UTC [973] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.834 UTC [973] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.842 UTC [976] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.866 UTC [976] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.887 UTC [976] LOG: entering standby mode logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:49.910 UTC [976] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.233 UTC [976] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.270 UTC [976] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.273 UTC [973] LOG: startup process (PID 976) exited with exit code 1 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.273 UTC [973] LOG: aborting startup due to startup process failure logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.273 UTC [973] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.319 UTC [973] LOG: database system is shut down logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.761 UTC [1026] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.762 UTC [1026] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.762 UTC [1026] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.788 UTC [1026] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.797 UTC [1029] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.820 UTC [1029] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.842 UTC [1029] LOG: entering standby mode logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.864 UTC [1029] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.185 UTC [1029] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.222 UTC [1029] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: startup process (PID 1029) exited with exit code 1 logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: aborting startup due to startup process failure logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:10 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.299 UTC [1026] LOG: database system is shut down logger.go:42: 16:32:11 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:32:13 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:32:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:32:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:32:13 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:32:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:32:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:32:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:32:13 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:32:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:32:13 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:32:29 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:32:30 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:32:30 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 13m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 3m56s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 5m11s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 4m59s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 4m38s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 6m22s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 5m9s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 5m9s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 5m35s logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 12m logger.go:42: 16:32:30 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 15m logger.go:42: 16:32:31 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.318 UTC [927] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.356 UTC [927] LOG: database system is shut down logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.803 UTC [989] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.804 UTC [989] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.804 UTC [989] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.831 UTC [989] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.842 UTC [992] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.865 UTC [992] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.888 UTC [992] LOG: entering standby mode logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.913 UTC [992] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.237 UTC [992] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.275 UTC [992] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.277 UTC [989] LOG: startup process (PID 992) exited with exit code 1 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.277 UTC [989] LOG: aborting startup due to startup process failure logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.277 UTC [989] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.315 UTC [989] LOG: database system is shut down logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.807 UTC [1043] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.808 UTC [1043] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.808 UTC [1043] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.834 UTC [1043] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.844 UTC [1046] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.868 UTC [1046] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.890 UTC [1046] LOG: entering standby mode logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.914 UTC [1046] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.237 UTC [1046] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.273 UTC [1046] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: startup process (PID 1046) exited with exit code 1 logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: aborting startup due to startup process failure logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:33 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.314 UTC [1043] LOG: database system is shut down logger.go:42: 16:32:34 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.273 UTC [973] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:31:50.319 UTC [973] LOG: database system is shut down logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.761 UTC [1026] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.762 UTC [1026] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.762 UTC [1026] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.788 UTC [1026] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.797 UTC [1029] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.820 UTC [1029] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.842 UTC [1029] LOG: entering standby mode logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:09.864 UTC [1029] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.185 UTC [1029] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.222 UTC [1029] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: startup process (PID 1029) exited with exit code 1 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: aborting startup due to startup process failure logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.299 UTC [1026] LOG: database system is shut down logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.785 UTC [1088] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.794 UTC [1091] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.818 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.840 UTC [1091] LOG: entering standby mode logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.863 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.183 UTC [1091] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.221 UTC [1091] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: startup process (PID 1091) exited with exit code 1 logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: aborting startup due to startup process failure logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:35 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.291 UTC [1088] LOG: database system is shut down logger.go:42: 16:32:36 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:32:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:32:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:32:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:32:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:32:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:32:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:32:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:32:38 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:32:38 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:32:38 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:32:54 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:32:55 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:32:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 13m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 4m20s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 5m35s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 5m23s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 5m2s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 6m46s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 5m33s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 5m33s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 5m59s logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 12m logger.go:42: 16:32:55 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 16m logger.go:42: 16:32:56 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.277 UTC [989] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.315 UTC [989] LOG: database system is shut down logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.807 UTC [1043] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.808 UTC [1043] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.808 UTC [1043] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.834 UTC [1043] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.844 UTC [1046] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.868 UTC [1046] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.890 UTC [1046] LOG: entering standby mode logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.914 UTC [1046] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.237 UTC [1046] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.273 UTC [1046] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: startup process (PID 1046) exited with exit code 1 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: aborting startup due to startup process failure logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.275 UTC [1043] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.314 UTC [1043] LOG: database system is shut down logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.803 UTC [1088] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.803 UTC [1088] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.804 UTC [1088] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.831 UTC [1088] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.841 UTC [1091] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.865 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.889 UTC [1091] LOG: entering standby mode logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:39.913 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.236 UTC [1091] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.273 UTC [1091] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.275 UTC [1088] LOG: startup process (PID 1091) exited with exit code 1 logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.275 UTC [1088] LOG: aborting startup due to startup process failure logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.275 UTC [1088] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:32:58 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.314 UTC [1088] LOG: database system is shut down logger.go:42: 16:32:58 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.225 UTC [1026] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:10.299 UTC [1026] LOG: database system is shut down logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.759 UTC [1088] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.785 UTC [1088] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.794 UTC [1091] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.818 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.840 UTC [1091] LOG: entering standby mode logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:29.863 UTC [1091] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.183 UTC [1091] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.221 UTC [1091] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: startup process (PID 1091) exited with exit code 1 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: aborting startup due to startup process failure logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.224 UTC [1088] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:30.291 UTC [1088] LOG: database system is shut down logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.757 UTC [1149] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.758 UTC [1149] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.758 UTC [1149] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.784 UTC [1149] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.794 UTC [1152] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.818 UTC [1152] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.840 UTC [1152] LOG: entering standby mode logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:49.862 UTC [1152] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.182 UTC [1152] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.221 UTC [1152] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.223 UTC [1149] LOG: startup process (PID 1152) exited with exit code 1 logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.223 UTC [1149] LOG: aborting startup due to startup process failure logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.223 UTC [1149] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:00 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.288 UTC [1149] LOG: database system is shut down logger.go:42: 16:33:01 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:33:03 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:33:03 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:03 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:33:19 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:33:20 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:33:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 13m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 13m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 14m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 4m45s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 6m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 5m48s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 5m27s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 7m11s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 5m58s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 5m58s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 6m24s logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 12m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 12m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 12m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 12m logger.go:42: 16:33:20 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 16m logger.go:42: 16:33:21 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.275 UTC [1088] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:40.314 UTC [1088] LOG: database system is shut down logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.842 UTC [1160] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.843 UTC [1160] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.843 UTC [1160] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.871 UTC [1160] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.881 UTC [1163] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.905 UTC [1163] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.927 UTC [1163] LOG: entering standby mode logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:32:59.950 UTC [1163] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.272 UTC [1163] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.309 UTC [1163] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.311 UTC [1160] LOG: startup process (PID 1163) exited with exit code 1 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.312 UTC [1160] LOG: aborting startup due to startup process failure logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.312 UTC [1160] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.351 UTC [1160] LOG: database system is shut down logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.810 UTC [1214] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.811 UTC [1214] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.811 UTC [1214] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.837 UTC [1214] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.848 UTC [1217] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.873 UTC [1217] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.895 UTC [1217] LOG: entering standby mode logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.918 UTC [1217] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.240 UTC [1217] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.276 UTC [1217] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: startup process (PID 1217) exited with exit code 1 logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: aborting startup due to startup process failure logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:23 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.316 UTC [1214] LOG: database system is shut down logger.go:42: 16:33:23 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.223 UTC [1149] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:32:50.288 UTC [1149] LOG: database system is shut down logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.763 UTC [1220] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.763 UTC [1220] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.763 UTC [1220] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.789 UTC [1220] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.798 UTC [1223] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.822 UTC [1223] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.843 UTC [1223] LOG: entering standby mode logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:09.865 UTC [1223] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.185 UTC [1223] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.224 UTC [1223] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.227 UTC [1220] LOG: startup process (PID 1223) exited with exit code 1 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.227 UTC [1220] LOG: aborting startup due to startup process failure logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.227 UTC [1220] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.285 UTC [1220] LOG: database system is shut down logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.773 UTC [1255] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.774 UTC [1255] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.774 UTC [1255] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.800 UTC [1255] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.809 UTC [1258] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.832 UTC [1258] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.854 UTC [1258] LOG: entering standby mode logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.876 UTC [1258] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.197 UTC [1258] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.236 UTC [1258] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: startup process (PID 1258) exited with exit code 1 logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: aborting startup due to startup process failure logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:25 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.288 UTC [1255] LOG: database system is shut down logger.go:42: 16:33:26 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:33:28 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:33:28 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:28 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:33:44 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:33:45 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:33:45 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 14m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 5m10s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 6m25s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 6m13s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 5m52s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 7m36s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 6m23s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 6m23s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 6m49s logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 13m logger.go:42: 16:33:45 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 16m logger.go:42: 16:33:46 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.312 UTC [1160] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:00.351 UTC [1160] LOG: database system is shut down logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.810 UTC [1214] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.811 UTC [1214] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.811 UTC [1214] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.837 UTC [1214] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.848 UTC [1217] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.873 UTC [1217] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.895 UTC [1217] LOG: entering standby mode logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.918 UTC [1217] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.240 UTC [1217] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.276 UTC [1217] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: startup process (PID 1217) exited with exit code 1 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: aborting startup due to startup process failure logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.316 UTC [1214] LOG: database system is shut down logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.800 UTC [1275] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.801 UTC [1275] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.801 UTC [1275] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.828 UTC [1275] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.838 UTC [1278] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.862 UTC [1278] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.884 UTC [1278] LOG: entering standby mode logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.907 UTC [1278] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.229 UTC [1278] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.265 UTC [1278] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: startup process (PID 1278) exited with exit code 1 logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: aborting startup due to startup process failure logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:48 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.305 UTC [1275] LOG: database system is shut down logger.go:42: 16:33:48 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.227 UTC [1220] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:10.285 UTC [1220] LOG: database system is shut down logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.773 UTC [1255] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.774 UTC [1255] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.774 UTC [1255] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.800 UTC [1255] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.809 UTC [1258] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.832 UTC [1258] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.854 UTC [1258] LOG: entering standby mode logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:19.876 UTC [1258] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.197 UTC [1258] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.236 UTC [1258] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: startup process (PID 1258) exited with exit code 1 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: aborting startup due to startup process failure logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.288 UTC [1255] LOG: database system is shut down logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.759 UTC [1319] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.760 UTC [1319] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.760 UTC [1319] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.786 UTC [1319] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.795 UTC [1322] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.818 UTC [1322] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.840 UTC [1322] LOG: entering standby mode logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.862 UTC [1322] LOG: restored log file "00000004.history" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.183 UTC [1322] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.220 UTC [1322] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: startup process (PID 1322) exited with exit code 1 logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: aborting startup due to startup process failure logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:33:51 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.288 UTC [1319] LOG: database system is shut down logger.go:42: 16:33:52 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:54 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:54 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:54 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:33:54 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:33:54 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:33:54 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:33:54 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:33:54 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:33:54 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:33:54 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:34:10 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:34:10 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:34:10 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 14m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 14m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 14m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 5m36s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 6m51s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 6m39s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 6m18s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 8m2s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 6m49s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 6m49s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 7m15s logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 13m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 13m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 13m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 13m logger.go:42: 16:34:10 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 17m logger.go:42: 16:34:12 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.279 UTC [1214] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.316 UTC [1214] LOG: database system is shut down logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.800 UTC [1275] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.801 UTC [1275] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.801 UTC [1275] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.828 UTC [1275] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.838 UTC [1278] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.862 UTC [1278] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.884 UTC [1278] LOG: entering standby mode logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.907 UTC [1278] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.229 UTC [1278] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.265 UTC [1278] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: startup process (PID 1278) exited with exit code 1 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: aborting startup due to startup process failure logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.305 UTC [1275] LOG: database system is shut down logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.848 UTC [1346] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.859 UTC [1349] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.882 UTC [1349] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.905 UTC [1349] LOG: entering standby mode logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.928 UTC [1349] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.050 UTC [1349] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.089 UTC [1349] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.091 UTC [1346] LOG: startup process (PID 1349) exited with exit code 1 logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.092 UTC [1346] LOG: aborting startup due to startup process failure logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.092 UTC [1346] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:13 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.133 UTC [1346] LOG: database system is shut down logger.go:42: 16:34:14 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.239 UTC [1255] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:20.288 UTC [1255] LOG: database system is shut down logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.759 UTC [1319] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.760 UTC [1319] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.760 UTC [1319] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.786 UTC [1319] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.795 UTC [1322] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.818 UTC [1322] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.840 UTC [1322] LOG: entering standby mode logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:39.862 UTC [1322] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.183 UTC [1322] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.220 UTC [1322] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: startup process (PID 1322) exited with exit code 1 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: aborting startup due to startup process failure logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.223 UTC [1319] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.288 UTC [1319] LOG: database system is shut down logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.773 UTC [1380] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.773 UTC [1380] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.773 UTC [1380] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.799 UTC [1380] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.808 UTC [1383] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.832 UTC [1383] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.853 UTC [1383] LOG: entering standby mode logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.874 UTC [1383] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.994 UTC [1383] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.032 UTC [1383] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.034 UTC [1380] LOG: startup process (PID 1383) exited with exit code 1 logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.034 UTC [1380] LOG: aborting startup due to startup process failure logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.034 UTC [1380] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:16 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.075 UTC [1380] LOG: database system is shut down logger.go:42: 16:34:16 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:34:18 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:34:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:34:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:34:18 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:34:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:34:18 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:34:18 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:34:18 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:34:18 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:34:18 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:34:35 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:34:35 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:34:35 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 15m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 6m1s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 7m16s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 7m4s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 6m43s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 8m27s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 7m14s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 7m14s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 7m40s logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 14m logger.go:42: 16:34:35 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 17m logger.go:42: 16:34:37 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.267 UTC [1275] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:40.305 UTC [1275] LOG: database system is shut down logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.821 UTC [1346] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.848 UTC [1346] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.859 UTC [1349] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.882 UTC [1349] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.905 UTC [1349] LOG: entering standby mode logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:33:59.928 UTC [1349] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.050 UTC [1349] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.089 UTC [1349] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.091 UTC [1346] LOG: startup process (PID 1349) exited with exit code 1 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.092 UTC [1346] LOG: aborting startup due to startup process failure logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.092 UTC [1346] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.133 UTC [1346] LOG: database system is shut down logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.819 UTC [1407] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.819 UTC [1407] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.819 UTC [1407] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.846 UTC [1407] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.855 UTC [1410] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.878 UTC [1410] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.900 UTC [1410] LOG: entering standby mode logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.924 UTC [1410] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.247 UTC [1410] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.284 UTC [1410] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.286 UTC [1407] LOG: startup process (PID 1410) exited with exit code 1 logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.286 UTC [1407] LOG: aborting startup due to startup process failure logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.286 UTC [1407] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:38 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.323 UTC [1407] LOG: database system is shut down logger.go:42: 16:34:39 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.034 UTC [1380] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:00.075 UTC [1380] LOG: database system is shut down logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.769 UTC [1451] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.769 UTC [1451] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.770 UTC [1451] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.796 UTC [1451] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.805 UTC [1454] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.827 UTC [1454] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.848 UTC [1454] LOG: entering standby mode logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:19.870 UTC [1454] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.190 UTC [1454] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.233 UTC [1454] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.235 UTC [1451] LOG: startup process (PID 1454) exited with exit code 1 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.235 UTC [1451] LOG: aborting startup due to startup process failure logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.235 UTC [1451] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.291 UTC [1451] LOG: database system is shut down logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.774 UTC [1505] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.775 UTC [1505] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.775 UTC [1505] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.801 UTC [1505] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.811 UTC [1508] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.836 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.857 UTC [1508] LOG: entering standby mode logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.880 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.202 UTC [1508] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.245 UTC [1508] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: startup process (PID 1508) exited with exit code 1 logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: aborting startup due to startup process failure logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:34:41 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.296 UTC [1505] LOG: database system is shut down logger.go:42: 16:34:41 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:34:43 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:34:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:34:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:34:43 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:34:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:34:43 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:34:43 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:34:43 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:34:43 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:34:43 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:35:00 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:35:00 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:35:00 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 15m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 6m26s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 7m41s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 7m29s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 7m8s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 8m52s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 7m39s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 7m39s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 8m5s logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 14m logger.go:42: 16:35:00 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 18m logger.go:42: 16:35:01 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.286 UTC [1407] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.323 UTC [1407] LOG: database system is shut down logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.795 UTC [1469] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.795 UTC [1469] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.795 UTC [1469] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.822 UTC [1469] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.833 UTC [1472] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.857 UTC [1472] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.879 UTC [1472] LOG: entering standby mode logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.902 UTC [1472] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.225 UTC [1472] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.261 UTC [1472] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.264 UTC [1469] LOG: startup process (PID 1472) exited with exit code 1 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.264 UTC [1469] LOG: aborting startup due to startup process failure logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.264 UTC [1469] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.304 UTC [1469] LOG: database system is shut down logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.828 UTC [1505] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.838 UTC [1508] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.863 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.886 UTC [1508] LOG: entering standby mode logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.910 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.233 UTC [1508] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.269 UTC [1508] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: startup process (PID 1508) exited with exit code 1 logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: aborting startup due to startup process failure logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:03 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.311 UTC [1505] LOG: database system is shut down logger.go:42: 16:35:04 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.235 UTC [1451] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:20.291 UTC [1451] LOG: database system is shut down logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.774 UTC [1505] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.775 UTC [1505] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.775 UTC [1505] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.801 UTC [1505] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.811 UTC [1508] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.836 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.857 UTC [1508] LOG: entering standby mode logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:39.880 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.202 UTC [1508] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.245 UTC [1508] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: startup process (PID 1508) exited with exit code 1 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: aborting startup due to startup process failure logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.249 UTC [1505] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.296 UTC [1505] LOG: database system is shut down logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.771 UTC [1548] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.772 UTC [1548] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.772 UTC [1548] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.798 UTC [1548] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.807 UTC [1551] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.831 UTC [1551] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.854 UTC [1551] LOG: entering standby mode logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.877 UTC [1551] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.199 UTC [1551] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.238 UTC [1551] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.241 UTC [1548] LOG: startup process (PID 1551) exited with exit code 1 logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.241 UTC [1548] LOG: aborting startup due to startup process failure logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.241 UTC [1548] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:06 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.293 UTC [1548] LOG: database system is shut down logger.go:42: 16:35:06 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:08 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:08 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:08 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:08 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:35:08 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:35:08 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:08 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:35:25 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:35:25 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:35:25 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 15m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 15m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 16m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 6m51s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 8m6s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 7m54s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 7m33s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 9m17s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 8m4s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 8m4s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 8m30s logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 14m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 14m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 14m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 14m logger.go:42: 16:35:25 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 18m logger.go:42: 16:35:27 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.264 UTC [1469] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:40.304 UTC [1469] LOG: database system is shut down logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.801 UTC [1505] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.828 UTC [1505] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.838 UTC [1508] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.863 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.886 UTC [1508] LOG: entering standby mode logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:49.910 UTC [1508] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.233 UTC [1508] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.269 UTC [1508] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: startup process (PID 1508) exited with exit code 1 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: aborting startup due to startup process failure logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.271 UTC [1505] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.311 UTC [1505] LOG: database system is shut down logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.817 UTC [1578] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.818 UTC [1578] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.818 UTC [1578] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.845 UTC [1578] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.856 UTC [1581] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.881 UTC [1581] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.905 UTC [1581] LOG: entering standby mode logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.930 UTC [1581] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.277 UTC [1581] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.315 UTC [1581] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.317 UTC [1578] LOG: startup process (PID 1581) exited with exit code 1 logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.317 UTC [1578] LOG: aborting startup due to startup process failure logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.317 UTC [1578] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:28 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.356 UTC [1578] LOG: database system is shut down logger.go:42: 16:35:29 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.241 UTC [1548] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:34:50.293 UTC [1548] LOG: database system is shut down logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.774 UTC [1610] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.775 UTC [1610] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.775 UTC [1610] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.802 UTC [1610] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.811 UTC [1613] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.837 UTC [1613] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.860 UTC [1613] LOG: entering standby mode logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:09.884 UTC [1613] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.207 UTC [1613] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.245 UTC [1613] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.248 UTC [1610] LOG: startup process (PID 1613) exited with exit code 1 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.248 UTC [1610] LOG: aborting startup due to startup process failure logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.248 UTC [1610] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.298 UTC [1610] LOG: database system is shut down logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.773 UTC [1673] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.774 UTC [1673] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.774 UTC [1673] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.801 UTC [1673] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.810 UTC [1676] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.836 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.858 UTC [1676] LOG: entering standby mode logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.881 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.202 UTC [1676] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.247 UTC [1676] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: startup process (PID 1676) exited with exit code 1 logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: aborting startup due to startup process failure logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:31 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.304 UTC [1673] LOG: database system is shut down logger.go:42: 16:35:31 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:33 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:33 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:33 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:33 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:35:33 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:35:33 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:33 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:35:49 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:35:50 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:35:50 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 16m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 7m16s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 8m31s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 8m19s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 7m58s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 9m42s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 8m29s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 8m29s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 8m55s logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 15m logger.go:42: 16:35:50 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 18m logger.go:42: 16:35:51 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.317 UTC [1578] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.356 UTC [1578] LOG: database system is shut down logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.794 UTC [1638] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.795 UTC [1638] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.795 UTC [1638] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.822 UTC [1638] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.833 UTC [1641] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.856 UTC [1641] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.879 UTC [1641] LOG: entering standby mode logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.907 UTC [1641] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.229 UTC [1641] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.265 UTC [1641] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.267 UTC [1638] LOG: startup process (PID 1641) exited with exit code 1 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.267 UTC [1638] LOG: aborting startup due to startup process failure logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.267 UTC [1638] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.306 UTC [1638] LOG: database system is shut down logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.830 UTC [1673] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.831 UTC [1673] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.831 UTC [1673] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.857 UTC [1673] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.868 UTC [1676] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.891 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.915 UTC [1676] LOG: entering standby mode logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.939 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.261 UTC [1676] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.299 UTC [1676] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.302 UTC [1673] LOG: startup process (PID 1676) exited with exit code 1 logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.302 UTC [1673] LOG: aborting startup due to startup process failure logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.302 UTC [1673] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:53 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.340 UTC [1673] LOG: database system is shut down logger.go:42: 16:35:54 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.248 UTC [1610] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:10.298 UTC [1610] LOG: database system is shut down logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.773 UTC [1673] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.774 UTC [1673] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.774 UTC [1673] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.801 UTC [1673] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.810 UTC [1676] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.836 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.858 UTC [1676] LOG: entering standby mode logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:29.881 UTC [1676] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.202 UTC [1676] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.247 UTC [1676] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: startup process (PID 1676) exited with exit code 1 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: aborting startup due to startup process failure logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.252 UTC [1673] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:30.304 UTC [1673] LOG: database system is shut down logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.772 UTC [1717] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.773 UTC [1717] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.773 UTC [1717] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.799 UTC [1717] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.808 UTC [1720] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.831 UTC [1720] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.853 UTC [1720] LOG: entering standby mode logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:39.875 UTC [1720] LOG: restored log file "00000004.history" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.196 UTC [1720] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.242 UTC [1720] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.245 UTC [1717] LOG: startup process (PID 1720) exited with exit code 1 logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.245 UTC [1717] LOG: aborting startup due to startup process failure logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.245 UTC [1717] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:35:56 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.291 UTC [1717] LOG: database system is shut down logger.go:42: 16:35:56 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:58 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:35:58 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:35:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:35:58 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:35:58 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:35:58 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:35:58 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:35:58 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:36:14 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:36:15 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:36:15 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 16m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 16m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 17m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 7m41s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 8m56s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 8m44s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 8m23s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 10m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 8m54s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 8m54s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 9m20s logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 15m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 15m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 15m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 15m logger.go:42: 16:36:15 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 19m logger.go:42: 16:36:16 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.302 UTC [1673] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:40.340 UTC [1673] LOG: database system is shut down logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.815 UTC [1736] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.815 UTC [1736] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.815 UTC [1736] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.843 UTC [1736] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.854 UTC [1739] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.879 UTC [1739] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.900 UTC [1739] LOG: entering standby mode logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:35:59.924 UTC [1739] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.247 UTC [1739] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.283 UTC [1739] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.285 UTC [1736] LOG: startup process (PID 1739) exited with exit code 1 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.285 UTC [1736] LOG: aborting startup due to startup process failure logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.285 UTC [1736] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.324 UTC [1736] LOG: database system is shut down logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.930 UTC [1771] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.931 UTC [1771] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.931 UTC [1771] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.967 UTC [1771] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.980 UTC [1774] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.021 UTC [1774] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.044 UTC [1774] LOG: entering standby mode logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.067 UTC [1774] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.391 UTC [1774] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.428 UTC [1774] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.430 UTC [1771] LOG: startup process (PID 1774) exited with exit code 1 logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.430 UTC [1771] LOG: aborting startup due to startup process failure logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.430 UTC [1771] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:18 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.470 UTC [1771] LOG: database system is shut down logger.go:42: 16:36:19 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.237 UTC [1779] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:00.287 UTC [1779] LOG: database system is shut down logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.765 UTC [1815] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.765 UTC [1815] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.765 UTC [1815] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.791 UTC [1815] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.800 UTC [1818] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.825 UTC [1818] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.847 UTC [1818] LOG: entering standby mode logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:09.869 UTC [1818] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.189 UTC [1818] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.242 UTC [1818] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.245 UTC [1815] LOG: startup process (PID 1818) exited with exit code 1 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.245 UTC [1815] LOG: aborting startup due to startup process failure logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.245 UTC [1815] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.294 UTC [1815] LOG: database system is shut down logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.779 UTC [1850] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.780 UTC [1850] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.780 UTC [1850] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.805 UTC [1850] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.814 UTC [1853] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.838 UTC [1853] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.860 UTC [1853] LOG: entering standby mode logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.882 UTC [1853] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.203 UTC [1853] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.247 UTC [1853] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: startup process (PID 1853) exited with exit code 1 logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: aborting startup due to startup process failure logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:21 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.296 UTC [1850] LOG: database system is shut down logger.go:42: 16:36:21 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:36:23 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:36:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:36:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:36:23 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:36:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:36:23 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:36:23 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:36:23 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:36:23 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:36:23 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:36:39 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:36:40 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:36:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 17m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 8m6s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 9m21s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 9m9s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 8m48s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 10m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 9m19s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 9m19s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 9m45s logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 16m logger.go:42: 16:36:40 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 19m logger.go:42: 16:36:41 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.430 UTC [1771] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.470 UTC [1771] LOG: database system is shut down logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.819 UTC [1824] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.820 UTC [1824] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.820 UTC [1824] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.847 UTC [1824] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.857 UTC [1827] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.880 UTC [1827] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.902 UTC [1827] LOG: entering standby mode logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.925 UTC [1827] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.248 UTC [1827] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.284 UTC [1827] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.287 UTC [1824] LOG: startup process (PID 1827) exited with exit code 1 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.287 UTC [1824] LOG: aborting startup due to startup process failure logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.287 UTC [1824] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.326 UTC [1824] LOG: database system is shut down logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.811 UTC [1880] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.812 UTC [1880] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.812 UTC [1880] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.839 UTC [1880] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.850 UTC [1883] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.876 UTC [1883] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.902 UTC [1883] LOG: entering standby mode logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.927 UTC [1883] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.254 UTC [1883] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.292 UTC [1883] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: startup process (PID 1883) exited with exit code 1 logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: aborting startup due to startup process failure logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:43 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.337 UTC [1880] LOG: database system is shut down logger.go:42: 16:36:44 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.245 UTC [1815] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:10.294 UTC [1815] LOG: database system is shut down logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.779 UTC [1850] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.780 UTC [1850] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.780 UTC [1850] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.805 UTC [1850] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.814 UTC [1853] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.838 UTC [1853] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.860 UTC [1853] LOG: entering standby mode logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:19.882 UTC [1853] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.203 UTC [1853] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.247 UTC [1853] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: startup process (PID 1853) exited with exit code 1 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: aborting startup due to startup process failure logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.296 UTC [1850] LOG: database system is shut down logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.757 UTC [1920] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.758 UTC [1920] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.758 UTC [1920] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.784 UTC [1920] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.793 UTC [1923] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.817 UTC [1923] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.838 UTC [1923] LOG: entering standby mode logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.860 UTC [1923] LOG: restored log file "00000004.history" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.181 UTC [1923] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.219 UTC [1923] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: startup process (PID 1923) exited with exit code 1 logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: aborting startup due to startup process failure logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:36:46 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.286 UTC [1920] LOG: database system is shut down logger.go:42: 16:36:46 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:36:48 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:36:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:36:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:36:48 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:36:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:36:48 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:36:48 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:36:48 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:36:48 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:36:48 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:37:04 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:37:05 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:37:05 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 17m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 8m31s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 9m46s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 9m34s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 9m13s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 10m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 9m44s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 9m44s logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 10m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 16m logger.go:42: 16:37:05 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 20m logger.go:42: 16:37:06 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.287 UTC [1824] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.326 UTC [1824] LOG: database system is shut down logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.811 UTC [1880] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.812 UTC [1880] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.812 UTC [1880] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.839 UTC [1880] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.850 UTC [1883] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.876 UTC [1883] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.902 UTC [1883] LOG: entering standby mode logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.927 UTC [1883] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.254 UTC [1883] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.292 UTC [1883] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: startup process (PID 1883) exited with exit code 1 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: aborting startup due to startup process failure logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.337 UTC [1880] LOG: database system is shut down logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.857 UTC [1940] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.870 UTC [1943] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.894 UTC [1943] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.918 UTC [1943] LOG: entering standby mode logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.942 UTC [1943] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.265 UTC [1943] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.302 UTC [1943] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: startup process (PID 1943) exited with exit code 1 logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: aborting startup due to startup process failure logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:08 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.346 UTC [1940] LOG: database system is shut down logger.go:42: 16:37:08 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.249 UTC [1850] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:20.296 UTC [1850] LOG: database system is shut down logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.757 UTC [1920] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.758 UTC [1920] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.758 UTC [1920] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.784 UTC [1920] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.793 UTC [1923] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.817 UTC [1923] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.838 UTC [1923] LOG: entering standby mode logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:39.860 UTC [1923] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.181 UTC [1923] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.219 UTC [1923] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: startup process (PID 1923) exited with exit code 1 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: aborting startup due to startup process failure logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.286 UTC [1920] LOG: database system is shut down logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.774 UTC [1983] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.775 UTC [1983] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.775 UTC [1983] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.801 UTC [1983] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.810 UTC [1986] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.835 UTC [1986] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.856 UTC [1986] LOG: entering standby mode logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.879 UTC [1986] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.201 UTC [1986] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.238 UTC [1986] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: startup process (PID 1986) exited with exit code 1 logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: aborting startup due to startup process failure logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:10 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.298 UTC [1983] LOG: database system is shut down logger.go:42: 16:37:11 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:37:13 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:37:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:37:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:37:13 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:37:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:37:13 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:37:13 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:37:13 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:37:13 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:37:13 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:37:29 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:37:30 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:37:30 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 18m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 8m56s logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 10m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 9m59s logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 9m38s logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 11m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 10m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 10m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 10m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 17m logger.go:42: 16:37:30 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 20m logger.go:42: 16:37:31 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.294 UTC [1880] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.337 UTC [1880] LOG: database system is shut down logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.830 UTC [1940] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.857 UTC [1940] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.870 UTC [1943] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.894 UTC [1943] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.918 UTC [1943] LOG: entering standby mode logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.942 UTC [1943] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.265 UTC [1943] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.302 UTC [1943] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: startup process (PID 1943) exited with exit code 1 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: aborting startup due to startup process failure logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.305 UTC [1940] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.346 UTC [1940] LOG: database system is shut down logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.825 UTC [2001] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.825 UTC [2001] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.825 UTC [2001] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.852 UTC [2001] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.862 UTC [2004] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.885 UTC [2004] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.908 UTC [2004] LOG: entering standby mode logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.931 UTC [2004] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.255 UTC [2004] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.292 UTC [2004] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.294 UTC [2001] LOG: startup process (PID 2004) exited with exit code 1 logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.294 UTC [2001] LOG: aborting startup due to startup process failure logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.294 UTC [2001] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:33 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.334 UTC [2001] LOG: database system is shut down logger.go:42: 16:37:34 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.222 UTC [1920] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:40.286 UTC [1920] LOG: database system is shut down logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.774 UTC [1983] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.775 UTC [1983] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.775 UTC [1983] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.801 UTC [1983] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.810 UTC [1986] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.835 UTC [1986] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.856 UTC [1986] LOG: entering standby mode logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:36:59.879 UTC [1986] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.201 UTC [1986] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.238 UTC [1986] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: startup process (PID 1986) exited with exit code 1 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: aborting startup due to startup process failure logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.241 UTC [1983] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:00.298 UTC [1983] LOG: database system is shut down logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.794 UTC [2046] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.794 UTC [2046] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.794 UTC [2046] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.821 UTC [2046] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.831 UTC [2049] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.855 UTC [2049] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.878 UTC [2049] LOG: entering standby mode logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:19.902 UTC [2049] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.223 UTC [2049] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.262 UTC [2049] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.265 UTC [2046] LOG: startup process (PID 2049) exited with exit code 1 logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.265 UTC [2046] LOG: aborting startup due to startup process failure logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.265 UTC [2046] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:35 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.312 UTC [2046] LOG: database system is shut down logger.go:42: 16:37:36 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:37:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:37:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:37:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:37:38 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:37:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:37:38 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:37:38 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:37:38 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:37:38 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:37:38 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:37:54 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:37:55 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:37:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 18m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 9m20s logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 11m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 10m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 17m logger.go:42: 16:37:55 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 21m logger.go:42: 16:37:56 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.294 UTC [2001] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:20.334 UTC [2001] LOG: database system is shut down logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.815 UTC [2071] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.816 UTC [2071] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.816 UTC [2071] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.843 UTC [2071] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.854 UTC [2074] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.877 UTC [2074] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.899 UTC [2074] LOG: entering standby mode logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:39.922 UTC [2074] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.247 UTC [2074] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.284 UTC [2074] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.286 UTC [2071] LOG: startup process (PID 2074) exited with exit code 1 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.286 UTC [2071] LOG: aborting startup due to startup process failure logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.286 UTC [2071] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.325 UTC [2071] LOG: database system is shut down logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.806 UTC [2106] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.807 UTC [2106] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.807 UTC [2106] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.835 UTC [2106] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.846 UTC [2109] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.869 UTC [2109] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.891 UTC [2109] LOG: entering standby mode logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.914 UTC [2109] LOG: restored log file "00000004.history" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.237 UTC [2109] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.273 UTC [2109] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.275 UTC [2106] LOG: startup process (PID 2109) exited with exit code 1 logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.275 UTC [2106] LOG: aborting startup due to startup process failure logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.275 UTC [2106] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:37:58 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.315 UTC [2106] LOG: database system is shut down logger.go:42: 16:37:59 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.242 UTC [2110] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:40.299 UTC [2110] LOG: database system is shut down logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.792 UTC [2156] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.793 UTC [2156] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.793 UTC [2156] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.798 UTC [2156] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.807 UTC [2159] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.831 UTC [2159] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.853 UTC [2159] LOG: entering standby mode logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:49.875 UTC [2159] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.196 UTC [2159] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.234 UTC [2159] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.237 UTC [2156] LOG: startup process (PID 2159) exited with exit code 1 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.237 UTC [2156] LOG: aborting startup due to startup process failure logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.237 UTC [2156] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.295 UTC [2156] LOG: database system is shut down logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.800 UTC [2191] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.810 UTC [2194] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.833 UTC [2194] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.855 UTC [2194] LOG: entering standby mode logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.877 UTC [2194] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.198 UTC [2194] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.236 UTC [2194] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: startup process (PID 2194) exited with exit code 1 logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: aborting startup due to startup process failure logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:00 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.295 UTC [2191] LOG: database system is shut down logger.go:42: 16:38:01 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:38:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:38:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:38:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:38:03 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:38:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:38:03 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:38:03 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:38:03 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:38:03 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:38:03 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:38:19 | major-upgrade/28-run-restore | running command: [sh -c set -o errexit kubectl -n ${NAMESPACE} get pod # for pod in $(kubectl -n ${NAMESPACE} get pod -l job-name=major-upgrade-pgbackrest-restore | awk '{print $1}'); do # echo "${pod} logs:" # kubectl -n ${NAMESPACE} logs ${pod} # done # for pod in $(kubectl -n ${NAMESPACE} get pods -l postgres-operator.crunchydata.com/data=postgres --no-headers | awk '{print $1}'); do phase=$(kubectl -n ${NAMESPACE} get pod/${pod} -o jsonpath={".status.phase"}) if [[ "${phase}" != "Running" ]]; then echo "Waiting for ${pod} to start running" continue fi echo "PostgreSQL logs from ${pod}:" echo "find /pgdata/pg14/log -type f -iname 'postgresql*.log' -exec tail -n 30 {} \;" \ | kubectl -n ${NAMESPACE} exec -it ${pod} -- bash 2>/dev/null done sleep 15] logger.go:42: 16:38:20 | major-upgrade/28-run-restore | NAME READY STATUS RESTARTS AGE logger.go:42: 16:38:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-jjsw-2pj2k 0/1 Completed 0 18m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | 13-to-14-major-upgrade-instance1-l2dk-llh9v 0/1 Completed 0 18m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | 13-to-14-pgdata-p7h2d 0/1 Completed 0 19m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-46cl2 0/1 Completed 0 9m45s logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-t6tbn 0/1 Error 0 11m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-vlss7 0/1 Error 0 10m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-backup-4qds-w6pxg 0/1 Error 0 10m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-backup-w5f8-tn58h 0/1 Completed 0 12m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-instance1-jjsw-0 3/4 Running 0 10m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-instance1-l2dk-0 3/4 Running 0 10m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-instance1-qt55-0 4/4 Running 0 11m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-2272q 2/2 Running 0 17m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-gj9w4 2/2 Running 0 17m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-pgbouncer-79b44c76f-hzgzk 2/2 Running 0 17m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | major-upgrade-repo-host-0 2/2 Running 0 17m logger.go:42: 16:38:20 | major-upgrade/28-run-restore | pg-client-b7cfff86c-84f6t 1/1 Running 0 21m logger.go:42: 16:38:21 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-jjsw-0: logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.275 UTC [2106] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.315 UTC [2106] LOG: database system is shut down logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.833 UTC [2149] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.834 UTC [2149] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.834 UTC [2149] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.860 UTC [2149] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.871 UTC [2152] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.894 UTC [2152] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.920 UTC [2152] LOG: entering standby mode logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.944 UTC [2152] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.267 UTC [2152] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.303 UTC [2152] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.305 UTC [2149] LOG: startup process (PID 2152) exited with exit code 1 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.305 UTC [2149] LOG: aborting startup due to startup process failure logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.305 UTC [2149] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.345 UTC [2149] LOG: database system is shut down logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.822 UTC [2202] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.823 UTC [2202] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.823 UTC [2202] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.850 UTC [2202] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.861 UTC [2205] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.883 UTC [2205] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.905 UTC [2205] LOG: entering standby mode logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.927 UTC [2205] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.249 UTC [2205] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.285 UTC [2205] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.287 UTC [2202] LOG: startup process (PID 2205) exited with exit code 1 logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.288 UTC [2202] LOG: aborting startup due to startup process failure logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.288 UTC [2202] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:23 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.327 UTC [2202] LOG: database system is shut down logger.go:42: 16:38:23 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-l2dk-0: logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.237 UTC [2156] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:50.295 UTC [2156] LOG: database system is shut down logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.774 UTC [2191] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.800 UTC [2191] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.810 UTC [2194] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.833 UTC [2194] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.855 UTC [2194] LOG: entering standby mode logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:37:59.877 UTC [2194] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.198 UTC [2194] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.236 UTC [2194] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: startup process (PID 2194) exited with exit code 1 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: aborting startup due to startup process failure logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.239 UTC [2191] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:00.295 UTC [2191] LOG: database system is shut down logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.773 UTC [2254] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.773 UTC [2254] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.773 UTC [2254] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.799 UTC [2254] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.808 UTC [2257] LOG: database system was shut down in recovery at 2025-04-11 16:26:34 UTC logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.832 UTC [2257] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.853 UTC [2257] LOG: entering standby mode logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:19.875 UTC [2257] LOG: restored log file "00000004.history" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.196 UTC [2257] LOG: restored log file "000000040000000000000017" from archive logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.233 UTC [2257] FATAL: requested timeline 4 does not contain minimum recovery point 0/170000A0 on timeline 3 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.236 UTC [2254] LOG: startup process (PID 2257) exited with exit code 1 logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.236 UTC [2254] LOG: aborting startup due to startup process failure logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.236 UTC [2254] LOG: [pg_stat_monitor] pgsm_shmem_shutdown: Shutdown initiated. logger.go:42: 16:38:25 | major-upgrade/28-run-restore | 2025-04-11 16:38:20.295 UTC [2254] LOG: database system is shut down logger.go:42: 16:38:26 | major-upgrade/28-run-restore | PostgreSQL logs from major-upgrade-instance1-qt55-0: logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:24:34.315 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:38:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:38:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:38:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:25:34.288 P00 ERROR: [103]: unable to find a valid repository: logger.go:42: 16:38:28 | major-upgrade/28-run-restore | repo1: [FileMissingError] unable to load info file '/pgbackrest/repo1/archive/db/archive.info' or '/pgbackrest/repo1/archive/db/archive.info.copy': logger.go:42: 16:38:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info' for read logger.go:42: 16:38:28 | major-upgrade/28-run-restore | FileMissingError: raised from remote-0 tls protocol on 'major-upgrade-repo-host-0.major-upgrade-pods.kuttl-test-mutual-maggot.svc.cluster.local.': unable to open missing file '/pgbackrest/repo1/archive/db/archive.info.copy' for read logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: archive.info cannot be opened but is required to push/get WAL segments. logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: is archive_command configured correctly in postgresql.conf? logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: has a stanza-create been performed? logger.go:42: 16:38:28 | major-upgrade/28-run-restore | HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme. logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] LOG: restore point "pgBackRest Archive Check" created at 0/14000140 logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:26:10.598 UTC [1350] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.045 UTC [19] LOG: starting PostgreSQL 14.17 - Percona Distribution on x86_64-pc-linux-gnu, compiled by gcc (GCC) 14.2.1 20240801 (Red Hat 14.2.1-1), 64-bit logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv4 address "0.0.0.0", port 5432 logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.046 UTC [19] LOG: listening on IPv6 address "::", port 5432 logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.050 UTC [19] LOG: listening on Unix socket "/tmp/postgres/.s.PGSQL.5432" logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.058 UTC [22] LOG: database system was shut down at 2025-04-11 16:26:49 UTC logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:17.074 UTC [19] LOG: database system is ready to accept connections logger.go:42: 16:38:28 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "00000004.history": read 42 of 8192 logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:27:18.061 UTC [19] LOG: received SIGHUP, reloading configuration files logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] LOG: restore point "pgBackRest Archive Check" created at 0/19000140 logger.go:42: 16:38:28 | major-upgrade/28-run-restore | 2025-04-11 16:28:39.916 UTC [513] STATEMENT: select pg_catalog.pg_create_restore_point('pgBackRest Archive Check')::text logger.go:42: 16:38:28 | major-upgrade/28-run-restore | pg_waldump: fatal: could not read file "000000040000000000000019.00000028.backup": read 374 of 8192 logger.go:42: 16:38:31 | major-upgrade/28-run-restore | test step failed 28-run-restore logger.go:42: 16:38:31 | major-upgrade/28-run-restore | collecting log output for [type==pod,label: postgres-operator.crunchydata.com/data=postgres] logger.go:42: 16:38:31 | major-upgrade/28-run-restore | running command: [kubectl logs --prefix -l postgres-operator.crunchydata.com/data=postgres -n kuttl-test-mutual-maggot --all-containers --tail=30] logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/nss-wrapper-init] nss_wrapper: user exists logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/nss-wrapper-init] nss_wrapper: group exists logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/nss-wrapper-init] nss_wrapper: environment configured logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/nss-wrapper-init] nss_wrapper: user exists logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/nss-wrapper-init] nss_wrapper: group exists logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/nss-wrapper-init] nss_wrapper: environment configured logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + IFS=, logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + read -ra extensions logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + args=('-type' 's3' '-region' 'eu-central-1' '-bucket' 'pg-extensions' '-extension-path' '/pgdata/extension/14') logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + declare -a args logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + [[ -n '' ]] logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + for installed in "${PGDATA_EXTENSIONS}"/*.installed logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] ++ basename -- '/pgdata/extension/14/*.installed' logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + filename='*.installed' logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + key='*' logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + [[ * == \* ]] logger.go:42: 16:38:32 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/extension-installer-14] + continue logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] Initializing ... logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: uid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: gid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: postgres path::/usr/pgsql-14/bin/postgres logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: postgres version::postgres (PostgreSQL) 14.17 - Percona Distribution logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: config directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: data directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: bootstrap directory::/pgdata/pg14_bootstrap logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: pgBackRest log directory::/pgdata/pgbackrest/log logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: data version::14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/postgres-startup] ::postgres-operator: wal directory::/pgdata/pg14_wal logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] + CRUNCHY_BINDIR=/opt/crunchy logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/pgbackrest /opt/crunchy/bin/pgbackrest logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-entrypoint.sh /opt/crunchy/bin/postgres-entrypoint.sh logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-liveness-check.sh /opt/crunchy/bin/postgres-liveness-check.sh logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-readiness-check.sh /opt/crunchy/bin/postgres-readiness-check.sh logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] Initializing ... logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: uid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: gid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: postgres path::/usr/pgsql-14/bin/postgres logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: postgres version::postgres (PostgreSQL) 14.17 - Percona Distribution logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: config directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: data directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: pgBackRest log directory::/pgdata/pgbackrest/log logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: data version::14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/postgres-startup] ::postgres-operator: wal directory::/pgdata/pg14_wal logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + IFS=, logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + read -ra extensions logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + args=('-type' 's3' '-region' 'eu-central-1' '-bucket' 'pg-extensions' '-extension-path' '/pgdata/extension/14') logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + declare -a args logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + [[ -n '' ]] logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + for installed in "${PGDATA_EXTENSIONS}"/*.installed logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] ++ basename -- '/pgdata/extension/14/*.installed' logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + filename='*.installed' logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + key='*' logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + [[ * == \* ]] logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/extension-installer-14] + continue logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/nss-wrapper-init] nss_wrapper: user exists logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/nss-wrapper-init] nss_wrapper: group exists logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/nss-wrapper-init] nss_wrapper: environment configured logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] Initializing ... logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: uid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: gid::26 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: postgres path::/usr/pgsql-14/bin/postgres logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: postgres version::postgres (PostgreSQL) 14.17 - Percona Distribution logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: config directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: data directory::/pgdata/pg14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: pgBackRest log directory::/pgdata/pgbackrest/log logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: data version::14 logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/postgres-startup] ::postgres-operator: wal directory::/pgdata/pg14_wal logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/pgbackrest] P00 INFO: server command begin 2.54.2: --exec-id=1-2fbfdc86 --log-level-console=detail --log-level-file=off --log-level-stderr=error --log-path=/pgdata/pgbackrest/log --no-log-timestamp --tls-server-address=0.0.0.0 --tls-server-auth=pgbackrest@a40c24af-348e-460f-84cf-58362556a499=* --tls-server-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --tls-server-cert-file=/etc/pgbackrest/server/server-tls.crt --tls-server-key-file=/etc/pgbackrest/server/server-tls.key logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] + CRUNCHY_BINDIR=/opt/crunchy logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/pgbackrest /opt/crunchy/bin/pgbackrest logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-entrypoint.sh /opt/crunchy/bin/postgres-entrypoint.sh logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-liveness-check.sh /opt/crunchy/bin/postgres-liveness-check.sh logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -u logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] ++ id -g logger.go:42: 16:38:33 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-readiness-check.sh /opt/crunchy/bin/postgres-readiness-check.sh logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:33:39,244 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:33:49,147 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:33:59,094 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:09,082 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:19,092 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:29,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:39,078 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:49,078 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:34:59,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:09,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:19,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:29,076 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:39,082 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:49,077 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:35:59,088 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:09,082 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:19,091 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:29,089 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:39,085 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:49,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:36:59,088 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:09,082 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:19,096 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:29,078 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:39,079 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:49,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:37:59,096 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:38:09,081 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:38:19,078 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-qt55-0/database] 2025-04-11 16:38:29,079 INFO: no action. I am (major-upgrade-instance1-qt55-0), the leader with the lock logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] wal_level setting: logical logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] wal_log_hints setting: on logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] max_connections setting: 100 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] max_worker_processes setting: 8 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] max_wal_senders setting: 10 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] max_prepared_xacts setting: 0 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] max_locks_per_xact setting: 64 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] track_commit_timestamp setting: on logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Maximum data alignment: 8 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Database block size: 8192 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Blocks per segment of large relation: 131072 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] WAL block size: 8192 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Bytes per WAL segment: 16777216 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Maximum length of identifiers: 64 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Maximum columns in an index: 32 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Maximum size of a TOAST chunk: 1996 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Size of a large-object chunk: 2048 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Date/time type storage: 64-bit integers logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Float8 argument passing: by value logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Data page checksum version: 1 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] Mock authentication nonce: 6cdc53f6d8513000d07e41a9596e5d8886e6de855a6dc5275fa46815a52188e9 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29,087 INFO: Lock owner: major-upgrade-instance1-qt55-0; I am major-upgrade-instance1-jjsw-0 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29,088 INFO: starting as a secondary logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29.577 UTC [2246] LOG: pgaudit extension initialized logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29,598 INFO: postmaster pid=2246 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] /tmp/postgres:5432 - no response logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29.800 UTC [2246] LOG: redirecting log output to logging collector process logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:29.800 UTC [2246] HINT: Future log output will appear in directory "log". logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/database] 2025-04-11 16:38:30,612 ERROR: postmaster is not running logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] wal_level setting: logical logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] wal_log_hints setting: on logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] max_connections setting: 100 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] max_worker_processes setting: 8 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] max_wal_senders setting: 10 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] max_prepared_xacts setting: 0 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] max_locks_per_xact setting: 64 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] track_commit_timestamp setting: on logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Maximum data alignment: 8 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Database block size: 8192 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Blocks per segment of large relation: 131072 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] WAL block size: 8192 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Bytes per WAL segment: 16777216 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Maximum length of identifiers: 64 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Maximum columns in an index: 32 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Maximum size of a TOAST chunk: 1996 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Size of a large-object chunk: 2048 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Date/time type storage: 64-bit integers logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Float8 argument passing: by value logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Data page checksum version: 1 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] Mock authentication nonce: 6cdc53f6d8513000d07e41a9596e5d8886e6de855a6dc5275fa46815a52188e9 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29,086 INFO: Lock owner: major-upgrade-instance1-qt55-0; I am major-upgrade-instance1-l2dk-0 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29,086 INFO: starting as a secondary logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29.546 UTC [2297] LOG: pgaudit extension initialized logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29,561 INFO: postmaster pid=2297 logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] /tmp/postgres:5432 - no response logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29.762 UTC [2297] LOG: redirecting log output to logging collector process logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:29.762 UTC [2297] HINT: Future log output will appear in directory "log". logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database] 2025-04-11 16:38:30,571 ERROR: postmaster is not running logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + IFS=, logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + read -ra extensions logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + args=('-type' 's3' '-region' 'eu-central-1' '-bucket' 'pg-extensions' '-extension-path' '/pgdata/extension/14') logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + declare -a args logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + [[ -n '' ]] logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + for installed in "${PGDATA_EXTENSIONS}"/*.installed logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] ++ basename -- '/pgdata/extension/14/*.installed' logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + filename='*.installed' logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + key='*' logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + [[ * == \* ]] logger.go:42: 16:38:34 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/extension-installer-14] + continue logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/pgbackrest] P00 INFO: server command begin 2.54.2: --exec-id=1-3d0807ee --log-level-console=detail --log-level-file=off --log-level-stderr=error --log-path=/pgdata/pgbackrest/log --no-log-timestamp --tls-server-address=0.0.0.0 --tls-server-auth=pgbackrest@a40c24af-348e-460f-84cf-58362556a499=* --tls-server-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --tls-server-cert-file=/etc/pgbackrest/server/server-tls.crt --tls-server-key-file=/etc/pgbackrest/server/server-tls.key logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-jjsw-0/pgbackrest] P00 INFO: server command begin 2.54.2: --exec-id=1-9a811c0b --log-level-console=detail --log-level-file=off --log-level-stderr=error --log-path=/pgdata/pgbackrest/log --no-log-timestamp --tls-server-address=0.0.0.0 --tls-server-auth=pgbackrest@a40c24af-348e-460f-84cf-58362556a499=* --tls-server-ca-file=/etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt --tls-server-cert-file=/etc/pgbackrest/server/server-tls.crt --tls-server-key-file=/etc/pgbackrest/server/server-tls.key logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] + CRUNCHY_BINDIR=/opt/crunchy logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -u logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -g logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/pgbackrest /opt/crunchy/bin/pgbackrest logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -u logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -g logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-entrypoint.sh /opt/crunchy/bin/postgres-entrypoint.sh logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -u logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -g logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-liveness-check.sh /opt/crunchy/bin/postgres-liveness-check.sh logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -u logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] ++ id -g logger.go:42: 16:38:35 | major-upgrade/28-run-restore | [pod/major-upgrade-instance1-l2dk-0/database-init] + install -o 2 -g 2 -m 0755 -D /usr/local/bin/postgres-readiness-check.sh /opt/crunchy/bin/postgres-readiness-check.sh logger.go:42: 16:38:35 | major-upgrade/28-run-restore | collecting log output for [type==pod,label: job-name=major-upgrade-pgbackrest-restore] logger.go:42: 16:38:35 | major-upgrade/28-run-restore | running command: [kubectl logs --prefix -l job-name=major-upgrade-pgbackrest-restore -n kuttl-test-mutual-maggot --all-containers --tail=300] logger.go:42: 16:38:35 | major-upgrade/28-run-restore | No resources found in kuttl-test-mutual-maggot namespace. case.go:378: failed in step 28-run-restore case.go:380: --- PerconaPGCluster:kuttl-test-mutual-maggot/major-upgrade +++ PerconaPGCluster:kuttl-test-mutual-maggot/major-upgrade @@ -1,18 +1,32 @@ apiVersion: pgv2.percona.com/v2 kind: PerconaPGCluster metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"pgv2.percona.com/v2","kind":"PerconaPGCluster","metadata":{"annotations":{},"labels":{"e2e":"major-upgrade"},"name":"major-upgrade","namespace":"kuttl-test-mutual-maggot"},"spec":{"backups":{"pgbackrest":{"image":"perconalab/percona-postgresql-operator:main-ppg13-pgbackrest","manual":{"options":["--type=full"],"repoName":"repo1"},"repoHost":{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/data":"pgbackrest"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}}},"repos":[{"name":"repo1","schedules":{"full":"0 0 * * 6"},"volume":{"volumeClaimSpec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}}]}},"crVersion":"2.7.0","extensions":{"image":"perconalab/percona-postgresql-operator:PR-988-1f8703f6e","imagePullPolicy":"Always","storage":{"bucket":"pg-extensions","region":"eu-central-1","secret":{"name":"aws-s3-secret"},"type":"s3"}},"image":"perconalab/percona-postgresql-operator:main-ppg13-postgres","imagePullPolicy":"Always","initContainer":{"image":"perconalab/percona-postgresql-operator:K8SPG-708-12"},"instances":[{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/data":"postgres"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}},"dataVolumeClaimSpec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"3Gi"}}},"name":"instance1","replicas":3}],"patroni":{"createReplicaMethods":["basebackup","pgbackrest"]},"pmm":{"customClusterName":"major-upgrade-pmm-custom-name","enabled":false,"image":"perconalab/pmm-client:dev-latest","postgresParams":"--environment=dev-postgres","secret":"major-upgrade-pmm-secret","serverHost":"monitoring-service"},"postgresVersion":13,"proxy":{"pgBouncer":{"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"postgres-operator.crunchydata.com/role":"pgbouncer"}},"topologyKey":"kubernetes.io/hostname"},"weight":1}]}},"image":"perconalab/percona-postgresql-operator:main-ppg13-pgbouncer","replicas":3}},"users":[{"name":"postgres","password":{"type":"AlphaNumeric"}},{"name":"major-upgrade","password":{"type":"AlphaNumeric"}}]}} + finalizers: + - internal.percona.com/stop-watchers + labels: + e2e: major-upgrade + managedFields: '[... elided field over 10 lines long ...]' name: major-upgrade namespace: kuttl-test-mutual-maggot +spec: '[... elided field over 10 lines long ...]' status: + host: major-upgrade-pgbouncer.kuttl-test-mutual-maggot.svc + installedCustomExtensions: [] + patroniVersion: 4.0.4 pgbouncer: ready: 3 size: 3 postgres: + imageID: docker.io/perconalab/percona-postgresql-operator@sha256:4d7d831ad04d1154f41b8c11765c81f56a531e07280a1a8e86b0b4e41c4eb321 instances: - name: instance1 - ready: 3 + ready: 1 size: 3 - ready: 3 + ready: 1 size: 3 - state: ready + version: 14 + state: initializing case.go:380: resource PerconaPGCluster:kuttl-test-mutual-maggot/major-upgrade: .status.state: value mismatch, expected: ready != actual: initializing case.go:380: command "kubectl -n ${NAMESPACE} get pod\\n for pod in $(kubectl -n ${NAMESPA..." exceeded 12 sec timeout, context deadline exceeded logger.go:42: 16:38:36 | major-upgrade | major-upgrade events from ns kuttl-test-mutual-maggot: logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:16:51 +0000 UTC Normal Pod pg-client-b7cfff86c-84f6t Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/pg-client-b7cfff86c-84f6t to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:16:51 +0000 UTC Normal ReplicaSet.apps pg-client-b7cfff86c SuccessfulCreate Created pod: pg-client-b7cfff86c-84f6t replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:16:51 +0000 UTC Normal Deployment.apps pg-client ScalingReplicaSet Scaled up replica set pg-client-b7cfff86c to 1 deployment-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:16:52 +0000 UTC Normal Pod pg-client-b7cfff86c-84f6t.spec.containers{pg-client} Pulling Pulling image "perconalab/percona-distribution-postgresql:16" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:01 +0000 UTC Normal Pod major-upgrade-patroni-version-check Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-patroni-version-check to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:02 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:10 +0000 UTC Normal Pod pg-client-b7cfff86c-84f6t.spec.containers{pg-client} Pulled Successfully pulled image "perconalab/percona-distribution-postgresql:16" in 17.335s (17.335s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:10 +0000 UTC Normal Pod pg-client-b7cfff86c-84f6t.spec.containers{pg-client} Created Created container: pg-client kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:10 +0000 UTC Normal Pod pg-client-b7cfff86c-84f6t.spec.containers{pg-client} Started Started container pg-client kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:23 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 20.942s (20.942s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:23 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Created Created container: patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:23 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Started Started container patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:27 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Killing Stopping container patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-jjsw-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-l2dk-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-l2dk-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-l2dk-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-mutual-maggot/major-upgrade-instance1-l2dk-pgdata" pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-l2dk SuccessfulCreate create Pod major-upgrade-instance1-l2dk-0 in StatefulSet major-upgrade-instance1-l2dk successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:28 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-qt55-pgdata WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-jjsw-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-jjsw-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-mutual-maggot/major-upgrade-instance1-jjsw-pgdata" pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-jjsw SuccessfulCreate create Pod major-upgrade-instance1-jjsw-0 in StatefulSet major-upgrade-instance1-jjsw successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-qt55-pgdata ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-qt55-pgdata Provisioning External provisioner is provisioning volume for claim "kuttl-test-mutual-maggot/major-upgrade-instance1-qt55-pgdata" pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-qt55 SuccessfulCreate create Pod major-upgrade-instance1-qt55-0 in StatefulSet major-upgrade-instance1-qt55 successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PersistentVolumeClaim major-upgrade-repo1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:29 +0000 UTC Normal PostgresCluster.postgres-operator.crunchydata.com major-upgrade RepoHostCreated created pgBackRest repository host StatefulSet/major-upgrade-repo-host postgrescluster-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Normal PodDisruptionBudget.policy major-upgrade-pgbouncer NoPods No matching pods found controllermanager logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Normal Deployment.apps major-upgrade-pgbouncer ScalingReplicaSet Scaled up replica set major-upgrade-pgbouncer-6c886fdbdc to 3 deployment-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Warning StatefulSet.apps major-upgrade-repo-host FailedCreate create Pod major-upgrade-repo-host-0 in StatefulSet major-upgrade-repo-host failed error: pods "major-upgrade-repo-host-0" is forbidden: error looking up service account kuttl-test-mutual-maggot/major-upgrade-pgbackrest: serviceaccount "major-upgrade-pgbackrest" not found statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Normal StatefulSet.apps major-upgrade-repo-host SuccessfulCreate create Pod major-upgrade-repo-host-0 in StatefulSet major-upgrade-repo-host successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Normal PersistentVolumeClaim major-upgrade-repo1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-mutual-maggot/major-upgrade-repo1" pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:30 +0000 UTC Normal PersistentVolumeClaim major-upgrade-repo1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-6c886fdbdc-hlrv4 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-6c886fdbdc-pp7b9 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 185ms (186ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-6c886fdbdc-z5dwc to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulCreate Created pod: major-upgrade-pgbouncer-6c886fdbdc-hlrv4 replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulCreate Created pod: major-upgrade-pgbouncer-6c886fdbdc-z5dwc replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:31 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulCreate Created pod: major-upgrade-pgbouncer-6c886fdbdc-pp7b9 replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-l2dk-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-l2dk-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-eb11c076-d76a-4f39-9098-b8786b0c0598 pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 255ms (255ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:32 +0000 UTC Warning Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc FailedMount MountVolume.SetUp failed for volume "pgbouncer-config" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:33 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-jjsw-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:33 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-jjsw-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-d1df1d74-84ce-4c6d-95f8-606de480eb2b pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:33 +0000 UTC Normal PersistentVolumeClaim major-upgrade-instance1-qt55-pgdata ProvisioningSucceeded Successfully provisioned volume pvc-63d33f78-e61c-43c8-b2db-7aecc3512a23 pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:33 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-qt55-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 227ms (227ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 245ms (245ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal Pod major-upgrade-repo-host-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-repo-host-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:34 +0000 UTC Normal PersistentVolumeClaim major-upgrade-repo1 ProvisioningSucceeded Successfully provisioned volume pvc-bf471493-033f-4fdc-ad26-4c627f6976d8 pd.csi.storage.gke.io_gke-8c54460bb41e4488b69f-0067-3b60-vm_6788aed5-7473-47eb-8e87-07ae1be2e8f7 logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:40 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-eb11c076-d76a-4f39-9098-b8786b0c0598" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:41 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d1df1d74-84ce-4c6d-95f8-606de480eb2b" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:41 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:41 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 264ms (264ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:41 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:41 +0000 UTC Normal Pod major-upgrade-repo-host-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bf471493-033f-4fdc-ad26-4c627f6976d8" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-13} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 256ms (256ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-13} Created Created container: extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-13} Started Started container extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:42 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-63d33f78-e61c-43c8-b2db-7aecc3512a23" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:43 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 257ms (257ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-13} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-13} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 225ms (225ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-13} Created Created container: extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-13} Started Started container extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 227ms (227ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-13} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:44 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 284ms (284ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-13} Created Created container: extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-13} Started Started container extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-13} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 255ms (256ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-13} Created Created container: extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-13} Started Started container extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:45 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-13} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 235ms (235ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-13} Created Created container: extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-13} Started Started container extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 247ms (247ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-13} Created Created container: extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-13} Started Started container extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:46 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 231ms (231ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 230ms (230ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:47 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 257ms (257ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 254ms (254ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:48 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 273ms (273ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 256ms (256ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 18.131s (18.131s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:49 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 4.103s (4.103s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbouncer" in 197ms (197ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:50 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:56 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:56 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 260ms (260ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:56 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:56 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:57 +0000 UTC Warning PostgresCluster.postgres-operator.crunchydata.com major-upgrade UnableToCreateStanzas command terminated with exit code 50: repo1-path = /pgbackrest/repo1 2025-04-11 16:17:57.531 P00 ERROR: [050]: unable to acquire lock on file '/tmp/pgbackrest/db-archive-1.lock': Resource temporarily unavailable HINT: is another pgBackRest process running? 2025-04-11 16:17:57.541 P00 ERROR: [050]: unable to acquire lock on file '/tmp/pgbackrest/db-archive-1.lock': Resource temporarily unavailable HINT: is another pgBackRest process running? postgrescluster-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-postgres" in 267ms (267ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" in 15.286s (15.286s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Created Created container: pgbackrest-log-dir kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:17:59 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Started Started container pgbackrest-log-dir kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:00 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:00 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" in 249ms (249ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:00 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:00 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:01 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:01 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" in 252ms (252ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:01 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" in 254ms (254ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Created Created container: pgbackrest-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Started Started container pgbackrest-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:02 +0000 UTC Normal PostgresCluster.postgres-operator.crunchydata.com major-upgrade StanzasCreated pgBackRest stanza creation completed successfully postgrescluster-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:05 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-qtz4-wrxwr to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:05 +0000 UTC Normal Job.batch major-upgrade-backup-qtz4 SuccessfulCreate Created pod: major-upgrade-backup-qtz4-wrxwr job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:06 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:06 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 285ms (285ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:06 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:06 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:07 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:08 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg13-pgbackrest" in 250ms (250ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:08 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:08 +0000 UTC Normal Pod major-upgrade-backup-qtz4-wrxwr.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:34 +0000 UTC Normal Job.batch major-upgrade-backup-qtz4 Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:52 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-l2dk SuccessfulDelete delete Pod major-upgrade-instance1-l2dk-0 in StatefulSet major-upgrade-instance1-l2dk successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:53 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-jjsw SuccessfulDelete delete Pod major-upgrade-instance1-jjsw-0 in StatefulSet major-upgrade-instance1-jjsw successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-hlrv4.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-pp7b9.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer} Killing Stopping container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Pod major-upgrade-pgbouncer-6c886fdbdc-z5dwc.spec.containers{pgbouncer-config} Killing Stopping container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulDelete Deleted pod: major-upgrade-pgbouncer-6c886fdbdc-pp7b9 replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulDelete Deleted pod: major-upgrade-pgbouncer-6c886fdbdc-hlrv4 replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-6c886fdbdc SuccessfulDelete Deleted pod: major-upgrade-pgbouncer-6c886fdbdc-z5dwc replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:54 +0000 UTC Normal Deployment.apps major-upgrade-pgbouncer ScalingReplicaSet Scaled down replica set major-upgrade-pgbouncer-6c886fdbdc to 0 from 3 deployment-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:18:56 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-qt55 SuccessfulDelete delete Pod major-upgrade-instance1-qt55-0 in StatefulSet major-upgrade-instance1-qt55 successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:06 +0000 UTC Normal PodDisruptionBudget.policy major-upgrade-set-instance1 NoPods No matching pods found controllermanager logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:08 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Killing Stopping container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:08 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Killing Stopping container pgbackrest-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:08 +0000 UTC Normal StatefulSet.apps major-upgrade-repo-host SuccessfulDelete delete Pod major-upgrade-repo-host-0 in StatefulSet major-upgrade-repo-host successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:11 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/13-to-14-pgdata-p7h2d to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:11 +0000 UTC Normal Job.batch 13-to-14-pgdata SuccessfulCreate Created pod: 13-to-14-pgdata-p7h2d job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:25 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-63d33f78-e61c-43c8-b2db-7aecc3512a23" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:27 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-13} Pulling Pulling image "perconalab/percona-postgresql-operator:main-upgrade" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:45 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-upgrade" in 18.118s (18.118s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:45 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-13} Created Created container: extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:45 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-13} Started Started container extension-relocator-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:55 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-13} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:56 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-13} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 247ms (247ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:56 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-13} Created Created container: extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:56 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-13} Started Started container extension-installer-13 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:56 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-upgrade" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:57 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-upgrade" in 227ms (227ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:57 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:57 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:57 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:58 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 206ms (206ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:58 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:58 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:58 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.containers{database} Pulled Container image "perconalab/percona-postgresql-operator:main-upgrade" already present on machine kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:58 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:19:59 +0000 UTC Normal Pod 13-to-14-pgdata-p7h2d.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:09 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-jjsw-2pj2k Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/13-to-14-major-upgrade-instance1-jjsw-2pj2k to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:09 +0000 UTC Normal Job.batch 13-to-14-major-upgrade-instance1-jjsw SuccessfulCreate Created pod: 13-to-14-major-upgrade-instance1-jjsw-2pj2k job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:09 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-l2dk-llh9v Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/13-to-14-major-upgrade-instance1-l2dk-llh9v to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:09 +0000 UTC Normal Job.batch 13-to-14-major-upgrade-instance1-l2dk SuccessfulCreate Created pod: 13-to-14-major-upgrade-instance1-l2dk-llh9v job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:09 +0000 UTC Normal Job.batch 13-to-14-pgdata Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:16 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-l2dk-llh9v SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-eb11c076-d76a-4f39-9098-b8786b0c0598" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:20 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-l2dk-llh9v.spec.containers{database} Pulled Container image "perconalab/percona-postgresql-operator:main-upgrade" already present on machine kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:20 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-l2dk-llh9v.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:20 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-l2dk-llh9v.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:21 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-jjsw-2pj2k SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d1df1d74-84ce-4c6d-95f8-606de480eb2b" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:22 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-jjsw-2pj2k.spec.containers{database} Pulled Container image "perconalab/percona-postgresql-operator:main-upgrade" already present on machine kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:22 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-jjsw-2pj2k.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:22 +0000 UTC Normal Pod 13-to-14-major-upgrade-instance1-jjsw-2pj2k.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:23 +0000 UTC Normal Job.batch 13-to-14-major-upgrade-instance1-l2dk Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:25 +0000 UTC Normal Job.batch 13-to-14-major-upgrade-instance1-jjsw Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:28 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-qt55-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-79b44c76f-2272q to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-79b44c76f-gj9w4 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbouncer-79b44c76f-hzgzk to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-79b44c76f SuccessfulCreate Created pod: major-upgrade-pgbouncer-79b44c76f-2272q replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-79b44c76f SuccessfulCreate Created pod: major-upgrade-pgbouncer-79b44c76f-hzgzk replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal ReplicaSet.apps major-upgrade-pgbouncer-79b44c76f SuccessfulCreate Created pod: major-upgrade-pgbouncer-79b44c76f-gj9w4 replicaset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal Deployment.apps major-upgrade-pgbouncer ScalingReplicaSet Scaled up replica set major-upgrade-pgbouncer-79b44c76f to 3 deployment-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:29 +0000 UTC Normal Pod major-upgrade-repo-host-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-repo-host-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 247ms (247ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 259ms (259ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:30 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 238ms (238ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 253ms (253ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-gj9w4.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:31 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-hzgzk.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:36 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-63d33f78-e61c-43c8-b2db-7aecc3512a23" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:36 +0000 UTC Normal Pod major-upgrade-repo-host-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bf471493-033f-4fdc-ad26-4c627f6976d8" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:37 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:37 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:38 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 259ms (259ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:38 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Created Created container: pgbackrest-log-dir kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:38 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{pgbackrest-log-dir} Started Started container pgbackrest-log-dir kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:39 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:39 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 202ms (202ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:39 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:39 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:40 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:40 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 252ms (252ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:40 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:40 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:40 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:41 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 265ms (265ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:41 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Created Created container: pgbackrest-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:41 +0000 UTC Normal Pod major-upgrade-repo-host-0.spec.containers{pgbackrest-config} Started Started container pgbackrest-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 14.576s (14.576s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer} Created Created container: pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer} Started Started container pgbouncer kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer-config} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer-config} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbouncer" in 238ms (238ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:44 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer-config} Created Created container: pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:45 +0000 UTC Normal Pod major-upgrade-pgbouncer-79b44c76f-2272q.spec.containers{pgbouncer-config} Started Started container pgbouncer-config kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:58 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 21.005s (21.005s including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:58 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:20:59 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:03 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:03 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 206ms (206ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:03 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:03 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:04 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:04 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 243ms (243ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:04 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:04 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:05 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:05 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 199ms (199ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:05 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:05 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:06 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:06 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 203ms (203ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:06 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:06 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:07 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:07 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 200ms (200ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:07 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:07 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:07 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:11 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-l2dk-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:12 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-jjsw-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:14 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-5rsx-695m9 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:14 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:14 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 253ms (254ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:14 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:14 +0000 UTC Normal Job.batch major-upgrade-backup-5rsx SuccessfulCreate Created pod: major-upgrade-backup-5rsx-695m9 job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:15 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:19 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:19 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-eb11c076-d76a-4f39-9098-b8786b0c0598" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 951ms (951ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-backup-5rsx-695m9.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 266ms (266ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:20 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:21 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d1df1d74-84ce-4c6d-95f8-606de480eb2b" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:21 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:21 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 242ms (242ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:21 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:21 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 287ms (287ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 252ms (252ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-patroni-version-check Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-patroni-version-check to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Pulled Container image "perconalab/percona-postgresql-operator:main-ppg14-postgres" already present on machine kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Created Created container: patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:22 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Started Started container patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 270ms (270ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 253ms (253ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:23 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 248ms (248ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 244ms (244ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:24 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 241ms (241ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 199ms (199ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:25 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:26 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:26 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 256ms (256ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:26 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:26 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:27 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:27 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 262ms (262ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:27 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:27 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:28 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:28 +0000 UTC Normal Pod major-upgrade-patroni-version-check.spec.containers{patroni-version-check} Killing Stopping container patroni-version-check kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:21:50 +0000 UTC Normal Job.batch major-upgrade-backup-5rsx Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-w5f8-tn58h to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 117ms (117ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:08 +0000 UTC Normal Job.batch major-upgrade-backup-w5f8 SuccessfulCreate Created pod: major-upgrade-backup-w5f8-tn58h job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:10 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:10 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 118ms (118ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:10 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:10 +0000 UTC Normal Pod major-upgrade-backup-w5f8-tn58h.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:22 +0000 UTC Normal Job.batch major-upgrade-backup-w5f8 Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:34 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:34 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:34 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Killing Stopping container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:39 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-pgbackrest-restore-jgspf to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:39 +0000 UTC Normal Job.batch major-upgrade-pgbackrest-restore SuccessfulCreate Created pod: major-upgrade-pgbackrest-restore-jgspf job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:44 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:44 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 116ms (116ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:44 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:44 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:45 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.containers{pgbackrest-restore} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:45 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.containers{pgbackrest-restore} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 108ms (108ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:45 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.containers{pgbackrest-restore} Created Created container: pgbackrest-restore kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:45 +0000 UTC Normal Pod major-upgrade-pgbackrest-restore-jgspf.spec.containers{pgbackrest-restore} Started Started container pgbackrest-restore kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:54 +0000 UTC Normal Job.batch major-upgrade-pgbackrest-restore Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:55 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-qt55-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:26:55 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-qt55 SuccessfulCreate create Pod major-upgrade-instance1-qt55-0 in StatefulSet major-upgrade-instance1-qt55 successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:09 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-63d33f78-e61c-43c8-b2db-7aecc3512a23" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:10 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:10 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 116ms (116ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:10 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:10 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:11 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:11 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 127ms (127ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:11 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:11 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:12 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:12 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 101ms (101ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:12 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:12 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:13 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:13 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 105ms (105ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:13 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:13 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:14 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:14 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 127ms (127ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:14 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:14 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:15 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:15 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 109ms (109ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:15 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:15 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:15 +0000 UTC Normal Pod major-upgrade-instance1-qt55-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:19 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-4qds-t6tbn to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:19 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:19 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 248ms (248ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:19 +0000 UTC Normal Job.batch major-upgrade-backup-4qds SuccessfulCreate Created pod: major-upgrade-backup-4qds-t6tbn job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 236ms (236ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:20 +0000 UTC Normal Pod major-upgrade-backup-4qds-t6tbn.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:21 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-jjsw SuccessfulCreate create Pod major-upgrade-instance1-jjsw-0 in StatefulSet major-upgrade-instance1-jjsw successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:21 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-l2dk-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-z1zf default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:21 +0000 UTC Normal StatefulSet.apps major-upgrade-instance1-l2dk SuccessfulCreate create Pod major-upgrade-instance1-l2dk-0 in StatefulSet major-upgrade-instance1-l2dk successful statefulset-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:22 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-instance1-jjsw-0 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-35vz default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:28 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-eb11c076-d76a-4f39-9098-b8786b0c0598" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:29 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d1df1d74-84ce-4c6d-95f8-606de480eb2b" attachdetach-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:30 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:30 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:30 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 213ms (213ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:30 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:30 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-4qds-vlss7 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 119ms (119ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Job.batch major-upgrade-backup-4qds SuccessfulCreate Created pod: major-upgrade-backup-4qds-vlss7 job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 271ms (271ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Created Created container: postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{postgres-startup} Started Started container postgres-startup kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 294ms (294ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 260ms (260ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Created Created container: extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:31 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:32 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:32 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 116ms (116ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:32 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:32 +0000 UTC Normal Pod major-upgrade-backup-4qds-vlss7.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:32 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-relocator-14} Started Started container extension-relocator-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 207ms (207ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Pulling Pulling image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:PR-988-1f8703f6e" in 187ms (187ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Created Created container: extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:33 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{extension-installer-14} Started Started container extension-installer-14 kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:34 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:34 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:34 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 239ms (239ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 248ms (248ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 191ms (191ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Created Created container: database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{database-init} Started Started container database-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 212ms (212ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Created Created container: nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:35 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.initContainers{nss-wrapper-init} Started Started container nss-wrapper-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 232ms (232ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-postgres" in 229ms (229ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Created Created container: database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:36 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:37 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Started Started container database kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:37 +0000 UTC Normal Pod major-upgrade-instance1-jjsw-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:37 +0000 UTC Normal Pod major-upgrade-instance1-l2dk-0.spec.containers{replication-cert-copy} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-postgres" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-4qds-w6pxg to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 123ms (123ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:52 +0000 UTC Normal Job.batch major-upgrade-backup-4qds SuccessfulCreate Created pod: major-upgrade-backup-4qds-w6pxg job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:53 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:53 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 110ms (110ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:53 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:27:53 +0000 UTC Normal Pod major-upgrade-backup-4qds-w6pxg.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2 Binding Scheduled Successfully assigned kuttl-test-mutual-maggot/major-upgrade-backup-4qds-46cl2 to gke-jen-pg-988-1f8703f6e-default-pool-45eabdfb-3n18 default-scheduler logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.initContainers{pgbackrest-init} Pulling Pulling image "perconalab/percona-postgresql-operator:K8SPG-708-12" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.initContainers{pgbackrest-init} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:K8SPG-708-12" in 116ms (116ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.initContainers{pgbackrest-init} Created Created container: pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.initContainers{pgbackrest-init} Started Started container pgbackrest-init kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:34 +0000 UTC Normal Job.batch major-upgrade-backup-4qds SuccessfulCreate Created pod: major-upgrade-backup-4qds-46cl2 job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:35 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.containers{pgbackrest} Pulling Pulling image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:35 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.containers{pgbackrest} Pulled Successfully pulled image "perconalab/percona-postgresql-operator:main-ppg14-pgbackrest" in 131ms (131ms including waiting) kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:35 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.containers{pgbackrest} Created Created container: pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:35 +0000 UTC Normal Pod major-upgrade-backup-4qds-46cl2.spec.containers{pgbackrest} Started Started container pgbackrest kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:28:47 +0000 UTC Normal Job.batch major-upgrade-backup-4qds Completed Job completed job-controller logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:32:30 +0000 UTC Warning Pod major-upgrade-instance1-l2dk-0.spec.containers{database} Unhealthy Readiness probe failed: kubelet logger.go:42: 16:38:36 | major-upgrade | 2025-04-11 16:32:40 +0000 UTC Warning Pod major-upgrade-instance1-jjsw-0.spec.containers{database} Unhealthy Readiness probe failed: kubelet logger.go:42: 16:38:36 | major-upgrade | Deleting namespace: kuttl-test-mutual-maggot === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- FAIL: kuttl (1362.30s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/major-upgrade (1361.86s) FAIL