Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/logs/pitr-sharded.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-sharded-15191 + local ns=pitr-sharded-15191 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.NFIeqMRGV9 ++ mktemp + local LAST_ERR=/tmp/tmp.nYPEyYURRy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NFIeqMRGV9 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.nYPEyYURRy + rm /tmp/tmp.NFIeqMRGV9 /tmp/tmp.nYPEyYURRy + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XTDHadDd0Q ++ mktemp + local LAST_ERR=/tmp/tmp.6GqGgV4md3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XTDHadDd0Q + cat /tmp/tmp.6GqGgV4md3 + rm /tmp/tmp.XTDHadDd0Q /tmp/tmp.6GqGgV4md3 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.tDVbsvqNCg ++ mktemp + local LAST_ERR=/tmp/tmp.RtJ0AqcsVd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tDVbsvqNCg + cat /tmp/tmp.RtJ0AqcsVd + rm /tmp/tmp.tDVbsvqNCg /tmp/tmp.RtJ0AqcsVd + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jjM3s2f0d7 ++ mktemp + local LAST_ERR=/tmp/tmp.YftSsn0OgK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jjM3s2f0d7 + cat /tmp/tmp.YftSsn0OgK + rm /tmp/tmp.jjM3s2f0d7 /tmp/tmp.YftSsn0OgK + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qLdwvX2Hsr ++ mktemp + local LAST_ERR=/tmp/tmp.pdI8fc4I2N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qLdwvX2Hsr clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.pdI8fc4I2N + rm /tmp/tmp.qLdwvX2Hsr /tmp/tmp.pdI8fc4I2N + return 0 + check_crd_for_deletion PR-1912-8f992ab1 + local git_tag=PR-1912-8f992ab1 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1912-8f992ab1/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E0WnB1MPew +++ mktemp ++ local LAST_ERR=/tmp/tmp.FR8gP5ijzF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.E0WnB1MPew ++ cat /tmp/tmp.FR8gP5ijzF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.E0WnB1MPew ++ cat /tmp/tmp.FR8gP5ijzF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.E0WnB1MPew ++ cat /tmp/tmp.FR8gP5ijzF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.E0WnB1MPew ++ cat /tmp/tmp.FR8gP5ijzF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.E0WnB1MPew /tmp/tmp.FR8gP5ijzF ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + awk '{print$1}' + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.2cdpyc1zXu ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Ls17pLUJAO + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.khJxwxK8mw ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.lkr54qzmVQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.khJxwxK8mw + cat /tmp/tmp.lkr54qzmVQ + rm /tmp/tmp.khJxwxK8mw /tmp/tmp.lkr54qzmVQ + return 0 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "pitr-sharded-25466" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2cdpyc1zXu namespace "psmdb-operator" deleted + cat /tmp/tmp.Ls17pLUJAO + rm /tmp/tmp.2cdpyc1zXu /tmp/tmp.Ls17pLUJAO + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aF6TWLgYCC ++ mktemp + local LAST_ERR=/tmp/tmp.xc0Kl25OJ1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aF6TWLgYCC + cat /tmp/tmp.xc0Kl25OJ1 + rm /tmp/tmp.aF6TWLgYCC /tmp/tmp.xc0Kl25OJ1 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fXAqFt3hUk ++ mktemp + local LAST_ERR=/tmp/tmp.bVLevZKdvU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fXAqFt3hUk namespace/psmdb-operator created + cat /tmp/tmp.bVLevZKdvU + rm /tmp/tmp.fXAqFt3hUk /tmp/tmp.bVLevZKdvU + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XbR6BcddqC +++ mktemp ++ local LAST_ERR=/tmp/tmp.xMaM02DBNg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XbR6BcddqC ++ cat /tmp/tmp.xMaM02DBNg ++ rm /tmp/tmp.XbR6BcddqC /tmp/tmp.xMaM02DBNg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.JX8r74wOJF ++ mktemp + local LAST_ERR=/tmp/tmp.M06qQAVpoq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JX8r74wOJF Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4" modified. + cat /tmp/tmp.M06qQAVpoq + rm /tmp/tmp.JX8r74wOJF /tmp/tmp.M06qQAVpoq + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.djlnGkrnsC ++ mktemp + local LAST_ERR=/tmp/tmp.afWJ4ThGIy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.djlnGkrnsC customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.afWJ4ThGIy + rm /tmp/tmp.djlnGkrnsC /tmp/tmp.afWJ4ThGIy + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.g8JG6QO7be ++ mktemp + local LAST_ERR=/tmp/tmp.jhPOHdn9VD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g8JG6QO7be clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.jhPOHdn9VD + rm /tmp/tmp.g8JG6QO7be /tmp/tmp.jhPOHdn9VD + return 0 + kubectl_bin apply -f - ++ mktemp + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1912-8f992ab1") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-operator.yaml + local LAST_OUT=/tmp/tmp.Xh177bb3ql ++ mktemp + local LAST_ERR=/tmp/tmp.mxu6RmlJnW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xh177bb3ql deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.mxu6RmlJnW + rm /tmp/tmp.Xh177bb3ql /tmp/tmp.mxu6RmlJnW + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1pPCzjLTLe +++ mktemp ++ local LAST_ERR=/tmp/tmp.AByoK1KBTh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1pPCzjLTLe ++ cat /tmp/tmp.AByoK1KBTh ++ rm /tmp/tmp.1pPCzjLTLe /tmp/tmp.AByoK1KBTh ++ return 0 + wait_pod percona-server-mongodb-operator-f6f898b87-dctqb + local pod=percona-server-mongodb-operator-f6f898b87-dctqb + set +o xtrace waiting for pod/percona-server-mongodb-operator-f6f898b87-dctqb to be ready.OK + create_namespace pitr-sharded-15191 + local namespace=pitr-sharded-15191 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-sharded-15191' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-sharded-15191 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-sharded-15191 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.W6j5reRvnN ++ mktemp + local LAST_ERR=/tmp/tmp.IfMVnamxqF + local exit_status=0 + local timeout=4 ++ seq 0 2 + xargs kubectl delete ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-sharded-15191 --ignore-not-found + awk '{print$1}' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.K4uDNAjiDI ++ mktemp + local LAST_ERR=/tmp/tmp.vxJtOMfCMD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K4uDNAjiDI + cat /tmp/tmp.vxJtOMfCMD + rm /tmp/tmp.K4uDNAjiDI /tmp/tmp.vxJtOMfCMD + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W6j5reRvnN + cat /tmp/tmp.IfMVnamxqF + rm /tmp/tmp.W6j5reRvnN /tmp/tmp.IfMVnamxqF + return 0 + kubectl_bin wait --for=delete namespace pitr-sharded-15191 ++ mktemp + local LAST_OUT=/tmp/tmp.B4nDLxScai ++ mktemp + local LAST_ERR=/tmp/tmp.Nj9IXYNK0i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace pitr-sharded-15191 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B4nDLxScai + cat /tmp/tmp.Nj9IXYNK0i + rm /tmp/tmp.B4nDLxScai /tmp/tmp.Nj9IXYNK0i + return 0 + desc 'create namespace pitr-sharded-15191' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-sharded-15191 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-sharded-15191 ++ mktemp namespace "gke-managed-system" deleted + local LAST_OUT=/tmp/tmp.KIHvudI1X7 ++ mktemp + local LAST_ERR=/tmp/tmp.euDNCwHDrG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pitr-sharded-15191 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KIHvudI1X7 namespace/pitr-sharded-15191 created + cat /tmp/tmp.euDNCwHDrG + rm /tmp/tmp.KIHvudI1X7 /tmp/tmp.euDNCwHDrG + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.UsackrNeWP +++ mktemp ++ local LAST_ERR=/tmp/tmp.aRTRwTusJI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UsackrNeWP ++ cat /tmp/tmp.aRTRwTusJI ++ rm /tmp/tmp.UsackrNeWP /tmp/tmp.aRTRwTusJI ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4 --namespace=pitr-sharded-15191 ++ mktemp + local LAST_OUT=/tmp/tmp.aP0qYbGcFK ++ mktemp + local LAST_ERR=/tmp/tmp.ZvKnTC7D0j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4 --namespace=pitr-sharded-15191 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aP0qYbGcFK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-8f992ab1-2-cluster4" modified. + cat /tmp/tmp.ZvKnTC7D0j + rm /tmp/tmp.aP0qYbGcFK /tmp/tmp.ZvKnTC7D0j + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu May 8 11:11:15 2025 NAMESPACE: pitr-sharded-15191 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-sharded-15191.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-sharded-15191 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-sharded-15191 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-sharded-15191 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-sharded-15191 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gVLo5tgU9W +++ mktemp ++ local LAST_ERR=/tmp/tmp.GkMrCaba0W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gVLo5tgU9W ++ cat /tmp/tmp.GkMrCaba0W ++ rm /tmp/tmp.gVLo5tgU9W /tmp/tmp.GkMrCaba0W ++ return 0 + MINIO_POD=minio-service-8967c7f7f-4zr98 + wait_pod minio-service-8967c7f7f-4zr98 + local pod=minio-service-8967c7f7f-4zr98 + set +o xtrace waiting for pod/minio-service-8967c7f7f-4zr98 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-sharded-15191.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.I24f4Efmcz ++ mktemp + local LAST_ERR=/tmp/tmp.JsV5zzyhuZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-sharded-15191.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I24f4Efmcz service/minio-service created + cat /tmp/tmp.JsV5zzyhuZ + rm /tmp/tmp.I24f4Efmcz /tmp/tmp.JsV5zzyhuZ + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.S3sbf6yYBM ++ mktemp + local LAST_ERR=/tmp/tmp.XqpkmRhoIM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S3sbf6yYBM make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.XqpkmRhoIM + rm /tmp/tmp.S3sbf6yYBM /tmp/tmp.XqpkmRhoIM + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dCoARVFeEp ++ mktemp + local LAST_ERR=/tmp/tmp.YfZJgxvyU0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dCoARVFeEp secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.YfZJgxvyU0 + rm /tmp/tmp.dCoARVFeEp /tmp/tmp.YfZJgxvyU0 + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.GzBL79abqG ++ mktemp + local LAST_ERR=/tmp/tmp.x4WKbDoVPJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GzBL79abqG secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.x4WKbDoVPJ + rm /tmp/tmp.GzBL79abqG /tmp/tmp.x4WKbDoVPJ + return 0 + desc 'create custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- create custom RuntimeClass ----------------------------------------------------------------------------------- + version_gt 1.19 ++ echo '1.30 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/container-rc.yaml + /usr/bin/sed s/docker/runc/g + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2iURBsOae5 ++ mktemp + local LAST_ERR=/tmp/tmp.w2gceTSYBb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2iURBsOae5 runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.w2gceTSYBb + rm /tmp/tmp.2iURBsOae5 /tmp/tmp.w2gceTSYBb + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + '[' 0 -eq 0 ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-8f992ab1"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + local LAST_OUT=/tmp/tmp.Usd7b1Px3N ++ mktemp + local LAST_ERR=/tmp/tmp.qcXMQFaMmA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Usd7b1Px3N perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.qcXMQFaMmA + rm /tmp/tmp.Usd7b1Px3N /tmp/tmp.qcXMQFaMmA + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KtSp5HoE4f +++ mktemp ++ local LAST_ERR=/tmp/tmp.AOXkfEr30R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KtSp5HoE4f ++ cat /tmp/tmp.AOXkfEr30R ++ rm /tmp/tmp.KtSp5HoE4f /tmp/tmp.AOXkfEr30R ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ggIHhYJLCv +++ mktemp ++ local LAST_ERR=/tmp/tmp.UnfkB9mYjO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ggIHhYJLCv ++ cat /tmp/tmp.UnfkB9mYjO ++ rm /tmp/tmp.ggIHhYJLCv /tmp/tmp.UnfkB9mYjO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yv1VmlK55T +++ mktemp ++ local LAST_ERR=/tmp/tmp.T6Afokozwi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yv1VmlK55T ++ cat /tmp/tmp.T6Afokozwi ++ rm /tmp/tmp.yv1VmlK55T /tmp/tmp.T6Afokozwi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xwoCXULMFD +++ mktemp ++ local LAST_ERR=/tmp/tmp.vIt5RIhkiW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xwoCXULMFD ++ cat /tmp/tmp.vIt5RIhkiW ++ rm /tmp/tmp.xwoCXULMFD /tmp/tmp.vIt5RIhkiW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xQxWJycELG +++ mktemp ++ local LAST_ERR=/tmp/tmp.tkR8O6NaIs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xQxWJycELG ++ cat /tmp/tmp.tkR8O6NaIs ++ rm /tmp/tmp.xQxWJycELG /tmp/tmp.tkR8O6NaIs ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-15191", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.bviKslaps3 ++ mktemp + local LAST_ERR=/tmp/tmp.AYHaaVl0ZU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bviKslaps3 + cat /tmp/tmp.AYHaaVl0ZU + rm /tmp/tmp.bviKslaps3 /tmp/tmp.AYHaaVl0ZU + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-rs1 + local resource=statefulset/some-name-rs1 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml + local new_result=/tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs1.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-15191", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs1 ++ mktemp + local LAST_OUT=/tmp/tmp.JcvYm7t6O1 ++ mktemp + local LAST_ERR=/tmp/tmp.qaFGwSzxgQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs1 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JcvYm7t6O1 + cat /tmp/tmp.qaFGwSzxgQ + rm /tmp/tmp.JcvYm7t6O1 /tmp/tmp.qaFGwSzxgQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs1.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs1.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs1.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs1.yml + compare_kubectl statefulset/some-name-rs2 + local resource=statefulset/some-name-rs2 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml + local new_result=/tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs2.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-15191", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs2 ++ mktemp + local LAST_OUT=/tmp/tmp.5vZOxiTDxe ++ mktemp + local LAST_ERR=/tmp/tmp.L1brhshns3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5vZOxiTDxe + cat /tmp/tmp.L1brhshns3 + rm /tmp/tmp.5vZOxiTDxe /tmp/tmp.L1brhshns3 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs2.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs2.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs2.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml /tmp/tmp.gH7mk8jHRC/statefulset_some-name-rs2.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.gH7mk8jHRC/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-15191", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.sYSsKBZNxF ++ mktemp + local LAST_ERR=/tmp/tmp.ND6zokDnvV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sYSsKBZNxF + cat /tmp/tmp.ND6zokDnvV + rm /tmp/tmp.sYSsKBZNxF /tmp/tmp.ND6zokDnvV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.gH7mk8jHRC/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.gH7mk8jHRC/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-15191", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vRRZvMbZwc ++ mktemp + local LAST_ERR=/tmp/tmp.XBBSB9XhT8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vRRZvMbZwc + cat /tmp/tmp.XBBSB9XhT8 + rm /tmp/tmp.vRRZvMbZwc /tmp/tmp.XBBSB9XhT8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gH7mk8jHRC/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.gH7mk8jHRC/statefulset_some-name-mongos.yml + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-15191 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G1eASzXTP2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.glNwEMkZbz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G1eASzXTP2 ++ cat /tmp/tmp.glNwEMkZbz ++ rm /tmp/tmp.G1eASzXTP2 /tmp/tmp.glNwEMkZbz ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5uw6O9xZea ++ mktemp + local LAST_ERR=/tmp/tmp.7OzrrOxVl7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5uw6O9xZea Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("14c51ed5-68a5-4d32-b466-577bd1aa782e") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.7OzrrOxVl7 + rm /tmp/tmp.5uw6O9xZea /tmp/tmp.7OzrrOxVl7 + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-15191 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Xhz9BcGWG +++ mktemp ++ local LAST_ERR=/tmp/tmp.nFgkro4Fjw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Xhz9BcGWG ++ cat /tmp/tmp.nFgkro4Fjw ++ rm /tmp/tmp.0Xhz9BcGWG /tmp/tmp.nFgkro4Fjw ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fkH9vDnHPI ++ mktemp + local LAST_ERR=/tmp/tmp.RNYTTH7rx6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fkH9vDnHPI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("da96a219-1617-4b4b-bd8d-7804e4998dc6") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RNYTTH7rx6 + rm /tmp/tmp.fkH9vDnHPI /tmp/tmp.RNYTTH7rx6 + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-15191 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-15191 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rtZdVWqMK2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UteK3iGNxe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rtZdVWqMK2 ++ cat /tmp/tmp.UteK3iGNxe ++ rm /tmp/tmp.rtZdVWqMK2 /tmp/tmp.UteK3iGNxe ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Wn5PDzz2Bt ++ mktemp + local LAST_ERR=/tmp/tmp.JoXR589p8P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wn5PDzz2Bt + cat /tmp/tmp.JoXR589p8P + rm /tmp/tmp.Wn5PDzz2Bt /tmp/tmp.JoXR589p8P + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/find.json /tmp/tmp.gH7mk8jHRC/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-05-08T11:15:10.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-05-08T11:15:06.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-05-08T11:15:06.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2025-05-08T11:15:09.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2025-05-08T11:15:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2025-05-08T11:15:07.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2025-05-08T11:15:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2025-05-08T11:15:12.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2025-05-08T11:15:08.000+0000 I listening for the commands + local backup_name + '[' 0 -eq 0 ']' + backup_name=backup-minio + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/backup-minio.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: backup-minio-0/' ++ mktemp + local LAST_OUT=/tmp/tmp.kwpzpAkAXu ++ mktemp + local LAST_ERR=/tmp/tmp.Zxx05nFtWI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kwpzpAkAXu perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.Zxx05nFtWI + rm /tmp/tmp.kwpzpAkAXu /tmp/tmp.Zxx05nFtWI + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + local target_state=ready + set +o xtrace waiting for backup-minio-0 to reach ready state............. + sleep 5 + compare_latest_restorable_time some-name-rs0 backup-minio-0 + local cluster=some-name-rs0 + local backup_name=backup-minio-0 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IBL8OllWsT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6pZ5BwnkNh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IBL8OllWsT +++ cat /tmp/tmp.6pZ5BwnkNh +++ rm /tmp/tmp.IBL8OllWsT /tmp/tmp.6pZ5BwnkNh +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6vZ5tOEyIG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lNsLK3Rzxt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6vZ5tOEyIG +++ cat /tmp/tmp.lNsLK3Rzxt +++ rm /tmp/tmp.6vZ5tOEyIG /tmp/tmp.lNsLK3Rzxt +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5BPwWUxY9B ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pgKISlMf28 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5BPwWUxY9B +++ cat /tmp/tmp.pgKISlMf28 +++ rm /tmp/tmp.5BPwWUxY9B /tmp/tmp.pgKISlMf28 +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1fSqsmEE9I ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Gy5x77QnGs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1fSqsmEE9I +++ cat /tmp/tmp.Gy5x77QnGs +++ rm /tmp/tmp.1fSqsmEE9I /tmp/tmp.Gy5x77QnGs +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 4 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H52xCgBnvy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5z2YZf1e2e +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.H52xCgBnvy +++ cat /tmp/tmp.5z2YZf1e2e +++ rm /tmp/tmp.H52xCgBnvy /tmp/tmp.5z2YZf1e2e +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 5 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Pxd0GXST27 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.chnbBoVek7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Pxd0GXST27 +++ cat /tmp/tmp.chnbBoVek7 +++ rm /tmp/tmp.Pxd0GXST27 /tmp/tmp.chnbBoVek7 +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 6 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iGGq9kRVLB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8XbNWZJxAQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iGGq9kRVLB +++ cat /tmp/tmp.8XbNWZJxAQ +++ rm /tmp/tmp.iGGq9kRVLB /tmp/tmp.8XbNWZJxAQ +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 7 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uABGNSBvTB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R4E5dYQBNJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uABGNSBvTB +++ cat /tmp/tmp.R4E5dYQBNJ +++ rm /tmp/tmp.uABGNSBvTB /tmp/tmp.R4E5dYQBNJ +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 8 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6PoM2hGwLW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m4D6gJkcqd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6PoM2hGwLW +++ cat /tmp/tmp.m4D6gJkcqd +++ rm /tmp/tmp.6PoM2hGwLW /tmp/tmp.m4D6gJkcqd +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 9 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uEkoQdxQvT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BI8OL89bvf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uEkoQdxQvT +++ cat /tmp/tmp.BI8OL89bvf +++ rm /tmp/tmp.uEkoQdxQvT /tmp/tmp.BI8OL89bvf +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 10 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6dHIZ8qIf2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.J8zngGGh2Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6dHIZ8qIf2 +++ cat /tmp/tmp.J8zngGGh2Y +++ rm /tmp/tmp.6dHIZ8qIf2 /tmp/tmp.J8zngGGh2Y +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 11 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YX70tKCesN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hqo7DaMRyd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YX70tKCesN +++ cat /tmp/tmp.hqo7DaMRyd +++ rm /tmp/tmp.YX70tKCesN /tmp/tmp.hqo7DaMRyd +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 12 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JRV3vqXm4z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0OC4ogJprW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JRV3vqXm4z +++ cat /tmp/tmp.0OC4ogJprW +++ rm /tmp/tmp.JRV3vqXm4z /tmp/tmp.0OC4ogJprW +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 13 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LLLbjqbYaP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JdwtnaY6zt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LLLbjqbYaP +++ cat /tmp/tmp.JdwtnaY6zt +++ rm /tmp/tmp.LLLbjqbYaP /tmp/tmp.JdwtnaY6zt +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 14 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mY3UTCPrcn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OyOkNCSRuD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mY3UTCPrcn +++ cat /tmp/tmp.OyOkNCSRuD +++ rm /tmp/tmp.mY3UTCPrcn /tmp/tmp.OyOkNCSRuD +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 15 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Dose3iIj2h ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yNVhGAh0Tc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Dose3iIj2h +++ cat /tmp/tmp.yNVhGAh0Tc +++ rm /tmp/tmp.Dose3iIj2h /tmp/tmp.yNVhGAh0Tc +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 16 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IDwnTgXhL9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8ElLagnQNP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IDwnTgXhL9 +++ cat /tmp/tmp.8ElLagnQNP +++ rm /tmp/tmp.IDwnTgXhL9 /tmp/tmp.8ElLagnQNP +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 17 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cftnE49TKs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R8Z2bz3d4B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cftnE49TKs +++ cat /tmp/tmp.R8Z2bz3d4B +++ rm /tmp/tmp.cftnE49TKs /tmp/tmp.R8Z2bz3d4B +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 18 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ckyGHUEc2R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HKzUVAtahK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ckyGHUEc2R +++ cat /tmp/tmp.HKzUVAtahK +++ rm /tmp/tmp.ckyGHUEc2R /tmp/tmp.HKzUVAtahK +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 19 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AWCgik90Ym ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ip5hPRo1S4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AWCgik90Ym +++ cat /tmp/tmp.Ip5hPRo1S4 +++ rm /tmp/tmp.AWCgik90Ym /tmp/tmp.Ip5hPRo1S4 +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 20 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gRhGdMivvq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vAJsTLH38u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gRhGdMivvq +++ cat /tmp/tmp.vAJsTLH38u +++ rm /tmp/tmp.gRhGdMivvq /tmp/tmp.vAJsTLH38u +++ return 0 ++ first_timestamp=1746703140 ++ sleep 5 ++ [[ 1746703140 != '' ]] ++ [[ 1746703140 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.r9B6kOmL6N ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xzzHCYxqWK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.r9B6kOmL6N +++ cat /tmp/tmp.xzzHCYxqWK +++ rm /tmp/tmp.r9B6kOmL6N /tmp/tmp.xzzHCYxqWK +++ return 0 ++ second_timestamp=1746703140 ++ let retry+=1 ++ [[ 21 -gt 30 ]] ++ [[ 1746703140 != '' ]] ++ [[ 1746703140 != \n\u\l\l ]] ++ [[ 1746703140 == 1746703140 ]] ++ /usr/bin/date -u -d @1746703140 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-05-08T11:19:00Z ++ get_latest_restorable_time_from_backup_object backup-minio-0 ++ local backup_name=backup-minio-0 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zaaeQnkncM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LVWv93EOyw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zaaeQnkncM +++ cat /tmp/tmp.LVWv93EOyw +++ rm /tmp/tmp.zaaeQnkncM /tmp/tmp.LVWv93EOyw +++ return 0 ++ latestRestorableTime=2025-05-08T11:19:00Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-05-08T11:19:00Z != '' ]] ++ [[ 2025-05-08T11:19:00Z != \n\u\l\l ]] ++ echo 2025-05-08T11:19:00Z + backup_time=2025-05-08T11:19:00Z + [[ 2025-05-08T11:19:00Z != \2\0\2\5\-\0\5\-\0\8\T\1\1\:\1\9\:\0\0\Z ]] + write_document -2nd + local cmp_postfix=-2nd + local sleep_value=0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-15191 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mTYEn7pJxC +++ mktemp ++ local LAST_ERR=/tmp/tmp.v4F1wcHRpq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mTYEn7pJxC ++ cat /tmp/tmp.v4F1wcHRpq ++ rm /tmp/tmp.mTYEn7pJxC /tmp/tmp.v4F1wcHRpq ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.PuOMowtbpU ++ mktemp + local LAST_ERR=/tmp/tmp.zIeKlGzM5L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PuOMowtbpU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0b10db9f-d1e9-480d-9636-5a73422ccc96") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zIeKlGzM5L + rm /tmp/tmp.PuOMowtbpU /tmp/tmp.zIeKlGzM5L + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-15191 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-15191 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GmBpJqAl2A +++ mktemp ++ local LAST_ERR=/tmp/tmp.a0bctEtlZf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GmBpJqAl2A ++ cat /tmp/tmp.a0bctEtlZf ++ rm /tmp/tmp.GmBpJqAl2A /tmp/tmp.a0bctEtlZf ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.iztAJDvVzF ++ mktemp + local LAST_ERR=/tmp/tmp.7rE6hygG9k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iztAJDvVzF + cat /tmp/tmp.7rE6hygG9k + rm /tmp/tmp.iztAJDvVzF /tmp/tmp.7rE6hygG9k + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.gH7mk8jHRC/find-2nd + sleep 2 ++ run_mongos 'new Date().getTime() / 1000' myApp:myPass@some-name-mongos.pitr-sharded-15191 mongodb '' --quiet ++ local 'command=new Date().getTime() / 1000' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ cut -d. -f1 +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vDItYUWM39 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KaP9NIQrrH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vDItYUWM39 +++ cat /tmp/tmp.KaP9NIQrrH +++ rm /tmp/tmp.vDItYUWM39 /tmp/tmp.KaP9NIQrrH +++ return 0 ++ local client_container=psmdb-client-66f577db5f-fkxp9 ++ kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pzJNBOaCn7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Hd79whuyr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pzJNBOaCn7 ++ cat /tmp/tmp.1Hd79whuyr ++ rm /tmp/tmp.pzJNBOaCn7 /tmp/tmp.1Hd79whuyr ++ return 0 + time_now=1746703178 + check_recovery backup-minio-0 date 1746703178 -2nd some-name + local backup_name=backup-minio-0 + local restore_type=date + local restore_date=1746703178 + local cmp_postfix=-2nd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kMLVQy04sO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Mr0PX0lnJ8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kMLVQy04sO +++ cat /tmp/tmp.Mr0PX0lnJ8 +++ rm /tmp/tmp.kMLVQy04sO /tmp/tmp.Mr0PX0lnJ8 +++ return 0 ++ echo 1746703140 + local latest_ts=1746703140 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-15191 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pcWKSQjxRE +++ mktemp ++ local LAST_ERR=/tmp/tmp.6MlvSNq6lJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pcWKSQjxRE ++ cat /tmp/tmp.6MlvSNq6lJ ++ rm /tmp/tmp.pcWKSQjxRE /tmp/tmp.6MlvSNq6lJ ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vcjzMmuDHs ++ mktemp + local LAST_ERR=/tmp/tmp.tQMFNwWSRX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vcjzMmuDHs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("7573543f-afaa-4d11-b027-b9ba6a07e680") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tQMFNwWSRX + rm /tmp/tmp.vcjzMmuDHs /tmp/tmp.tQMFNwWSRX + return 0 + [[ -n 1746703178 ]] ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + desc 'Restoring to time 2025-05-08 11:19:38' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2025-05-08 11:19:38 ----------------------------------------------------------------------------------- + retries=0 + [[ 1746703140 -gt 1746703178 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FOeHeLrztb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qsYLkc9EnH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FOeHeLrztb +++ cat /tmp/tmp.qsYLkc9EnH +++ rm /tmp/tmp.FOeHeLrztb /tmp/tmp.qsYLkc9EnH +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=1 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ONOFcoZnc3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rr1lZ56K32 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ONOFcoZnc3 +++ cat /tmp/tmp.rr1lZ56K32 +++ rm /tmp/tmp.ONOFcoZnc3 /tmp/tmp.rr1lZ56K32 +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=2 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xV2gwKy8eL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fG5jJyWsNJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xV2gwKy8eL +++ cat /tmp/tmp.fG5jJyWsNJ +++ rm /tmp/tmp.xV2gwKy8eL /tmp/tmp.fG5jJyWsNJ +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=3 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z8Zb8LyzSL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0SfwSmywlp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.z8Zb8LyzSL +++ cat /tmp/tmp.0SfwSmywlp +++ rm /tmp/tmp.z8Zb8LyzSL /tmp/tmp.0SfwSmywlp +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=4 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zswHNOL5hD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l0AQ6t1NCy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zswHNOL5hD +++ cat /tmp/tmp.l0AQ6t1NCy +++ rm /tmp/tmp.zswHNOL5hD /tmp/tmp.l0AQ6t1NCy +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=5 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.y98KxBSx2c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eR4FBQsN8F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.y98KxBSx2c +++ cat /tmp/tmp.eR4FBQsN8F +++ rm /tmp/tmp.y98KxBSx2c /tmp/tmp.eR4FBQsN8F +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=6 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qSFCozcW0p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bygZEvdRvE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qSFCozcW0p +++ cat /tmp/tmp.bygZEvdRvE +++ rm /tmp/tmp.qSFCozcW0p /tmp/tmp.bygZEvdRvE +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=7 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZwTbNyy6nx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AZx6p2NSA5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZwTbNyy6nx +++ cat /tmp/tmp.AZx6p2NSA5 +++ rm /tmp/tmp.ZwTbNyy6nx /tmp/tmp.AZx6p2NSA5 +++ return 0 ++ echo 1746703140 + latest_ts=1746703140 + retries=8 ++ format_date 1746703140 ++ local timestamp=1746703140 +++ TZ=UTC +++ /usr/bin/date -d@1746703140 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:19:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703140 -gt 1746703178 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Wsji7xsPOQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NRP8pw3WnM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Wsji7xsPOQ +++ cat /tmp/tmp.NRP8pw3WnM +++ rm /tmp/tmp.Wsji7xsPOQ /tmp/tmp.NRP8pw3WnM +++ return 0 ++ echo 1746703260 + latest_ts=1746703260 + retries=9 ++ format_date 1746703260 ++ local timestamp=1746703260 +++ TZ=UTC +++ /usr/bin/date -d@1746703260 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:21:00 ++ format_date 1746703178 ++ local timestamp=1746703178 +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:19:38 + echo 'Waiting for last oplog chunk (2025-05-08 11:21:00) to be greater than restore target (2025-05-08 11:19:38)' Waiting for last oplog chunk (2025-05-08 11:21:00) to be greater than restore target (2025-05-08 11:19:38) + sleep 10 + [[ 1746703260 -gt 1746703178 ]] + '[' -z '' ']' + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-0/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-0/' + /usr/bin/sed -e 's/pitrType:/type: date/' + '[' -z 1746703178 ']' + /usr/bin/sed -e /backupSource/,+8d + kubectl_bin apply -f - ++ format_date 1746703178 ++ local timestamp=1746703178 ++ mktemp +++ TZ=UTC +++ /usr/bin/date -d@1746703178 '+%Y-%m-%d %H:%M:%S' + local LAST_OUT=/tmp/tmp.GQgcXrkNSH ++ mktemp ++ echo 2025-05-08 11:19:38 + /usr/bin/sed -e 's/date:/date: 2025-05-08 11:19:38/' + local LAST_ERR=/tmp/tmp.4e27Tw8CX0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GQgcXrkNSH perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.4e27Tw8CX0 + rm /tmp/tmp.GQgcXrkNSH /tmp/tmp.4e27Tw8CX0 + return 0 + wait_restore backup-minio-0 some-name requested 0 900 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=900 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-0 to reach requested state...................................................OK + '[' 0 -eq 1 ']' + echo + wait_restore backup-minio-0 some-name ready 0 1600 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-0 to reach ready state.......OK + '[' 0 -eq 1 ']' + echo + set -o xtrace + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3lEJIzVM92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1tUkUKLQc5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3lEJIzVM92 ++ cat /tmp/tmp.1tUkUKLQc5 ++ rm /tmp/tmp.3lEJIzVM92 /tmp/tmp.1tUkUKLQc5 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZxIh12s7A4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5yhPx2iZMh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZxIh12s7A4 ++ cat /tmp/tmp.5yhPx2iZMh ++ rm /tmp/tmp.ZxIh12s7A4 /tmp/tmp.5yhPx2iZMh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-15191 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-15191 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hJV4xzSNDV +++ mktemp ++ local LAST_ERR=/tmp/tmp.8plg75yJ9p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hJV4xzSNDV ++ cat /tmp/tmp.8plg75yJ9p ++ rm /tmp/tmp.hJV4xzSNDV /tmp/tmp.8plg75yJ9p ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.mK9Sopdntl ++ mktemp + local LAST_ERR=/tmp/tmp.UpDBssxj9m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mK9Sopdntl + cat /tmp/tmp.UpDBssxj9m + rm /tmp/tmp.mK9Sopdntl /tmp/tmp.UpDBssxj9m + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.gH7mk8jHRC/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/backup-minio.yml + /usr/bin/sed -e 's/name:/name: backup-minio-1/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OXlhu2evI9 ++ mktemp + local LAST_ERR=/tmp/tmp.ktDNWcYkJw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OXlhu2evI9 perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.ktDNWcYkJw + rm /tmp/tmp.OXlhu2evI9 /tmp/tmp.ktDNWcYkJw + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state................ + sleep 5 + check_recovery backup-minio-1 latest '' -3rd some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GteiME7yVK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kTAPArj90t +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GteiME7yVK +++ cat /tmp/tmp.kTAPArj90t +++ rm /tmp/tmp.GteiME7yVK /tmp/tmp.kTAPArj90t +++ return 0 ++ echo 1746703350 + local latest_ts=1746703350 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-15191 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tCEuj63oQ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XiMCMFpdfi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tCEuj63oQ4 ++ cat /tmp/tmp.XiMCMFpdfi ++ rm /tmp/tmp.tCEuj63oQ4 /tmp/tmp.XiMCMFpdfi ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.dyUKQqrIGH ++ mktemp + local LAST_ERR=/tmp/tmp.YtsHV7LzD2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dyUKQqrIGH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3e3cd53b-c5a7-416a-8850-e08c1dc24e12") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.YtsHV7LzD2 + rm /tmp/tmp.dyUKQqrIGH /tmp/tmp.YtsHV7LzD2 + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T1rMgA13L8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xYsdidDJqZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T1rMgA13L8 +++ cat /tmp/tmp.xYsdidDJqZ +++ rm /tmp/tmp.T1rMgA13L8 /tmp/tmp.xYsdidDJqZ +++ return 0 ++ echo 1746703350 + local current_ts=1746703350 + retries=0 + [[ 1746703350 -gt 1746703350 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ek7JyqnEkZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rVkXHMIsKA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ek7JyqnEkZ +++ cat /tmp/tmp.rVkXHMIsKA +++ rm /tmp/tmp.ek7JyqnEkZ /tmp/tmp.rVkXHMIsKA +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=1 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YVRt07o576 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9FytnaOjB5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YVRt07o576 +++ cat /tmp/tmp.9FytnaOjB5 +++ rm /tmp/tmp.YVRt07o576 /tmp/tmp.9FytnaOjB5 +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=2 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Izja93nJpk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EREk2aaL97 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Izja93nJpk +++ cat /tmp/tmp.EREk2aaL97 +++ rm /tmp/tmp.Izja93nJpk /tmp/tmp.EREk2aaL97 +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=3 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TeIAxF8Z0r ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DndWebjkup +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TeIAxF8Z0r +++ cat /tmp/tmp.DndWebjkup +++ rm /tmp/tmp.TeIAxF8Z0r /tmp/tmp.DndWebjkup +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=4 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yiGqZ0Ykdc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.igNAiPERdV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yiGqZ0Ykdc +++ cat /tmp/tmp.igNAiPERdV +++ rm /tmp/tmp.yiGqZ0Ykdc /tmp/tmp.igNAiPERdV +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=5 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8pp0meUi6P ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XWbcBIHtuN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8pp0meUi6P +++ cat /tmp/tmp.XWbcBIHtuN +++ rm /tmp/tmp.8pp0meUi6P /tmp/tmp.XWbcBIHtuN +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=6 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aSMruD5ERU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TTVQLEimYR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aSMruD5ERU +++ cat /tmp/tmp.TTVQLEimYR +++ rm /tmp/tmp.aSMruD5ERU /tmp/tmp.TTVQLEimYR +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=7 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CS9osiTd7f ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6t8KQtPXKn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CS9osiTd7f +++ cat /tmp/tmp.6t8KQtPXKn +++ rm /tmp/tmp.CS9osiTd7f /tmp/tmp.6t8KQtPXKn +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=8 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4GNCxTeOBa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vr9LQUNaDh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4GNCxTeOBa +++ cat /tmp/tmp.vr9LQUNaDh +++ rm /tmp/tmp.4GNCxTeOBa /tmp/tmp.vr9LQUNaDh +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=9 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s1VCA0fUaG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AmMjqsVQ3I +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s1VCA0fUaG +++ cat /tmp/tmp.AmMjqsVQ3I +++ rm /tmp/tmp.s1VCA0fUaG /tmp/tmp.AmMjqsVQ3I +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=10 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6S5ryeiV2Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fbI2fCoxTF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6S5ryeiV2Y +++ cat /tmp/tmp.fbI2fCoxTF +++ rm /tmp/tmp.6S5ryeiV2Y /tmp/tmp.fbI2fCoxTF +++ return 0 ++ echo 1746703350 + latest_ts=1746703350 + retries=11 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:22:30) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703350 -gt 1746703350 ]] + [[ 11 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qFD5ei4XBB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Uvac3qDWZ8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qFD5ei4XBB +++ cat /tmp/tmp.Uvac3qDWZ8 +++ rm /tmp/tmp.qFD5ei4XBB /tmp/tmp.Uvac3qDWZ8 +++ return 0 ++ echo 1746703616 + latest_ts=1746703616 + retries=12 ++ format_date 1746703616 ++ local timestamp=1746703616 +++ TZ=UTC +++ /usr/bin/date -d@1746703616 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:26:56 ++ format_date 1746703350 ++ local timestamp=1746703350 +++ TZ=UTC +++ /usr/bin/date -d@1746703350 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-08 11:22:30 + echo 'Waiting for last oplog chunk (2025-05-08 11:26:56) to be 120 seconds older than starting chunk (2025-05-08 11:22:30)' Waiting for last oplog chunk (2025-05-08 11:26:56) to be 120 seconds older than starting chunk (2025-05-08 11:22:30) + sleep 10 + [[ 1746703616 -gt 1746703350 ]] + '[' -z '' ']' + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/conf/restore.yml + /usr/bin/sed -e /backupSource/,+8d + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/bin/sed -e 's/pitrType:/type: latest/' + /usr/bin/sed -e 's/name:/name: restore-backup-minio-1/' + kubectl_bin apply -f - + '[' -z '' ']' + /usr/bin/sed -e /date:/d ++ mktemp + local LAST_OUT=/tmp/tmp.GDOtYPSBCs ++ mktemp + local LAST_ERR=/tmp/tmp.jChX1DjNyZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GDOtYPSBCs perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.jChX1DjNyZ + rm /tmp/tmp.GDOtYPSBCs /tmp/tmp.jChX1DjNyZ + return 0 + wait_restore backup-minio-1 some-name requested 0 900 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=900 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-1 to reach requested state.......................................................OK + '[' 0 -eq 1 ']' + echo + wait_restore backup-minio-1 some-name ready 0 1600 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-1 to reach ready state.........OK + '[' 0 -eq 1 ']' + echo + set -o xtrace + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f9T20ltZV1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8a6Bvy50iG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f9T20ltZV1 ++ cat /tmp/tmp.8a6Bvy50iG ++ rm /tmp/tmp.f9T20ltZV1 /tmp/tmp.8a6Bvy50iG ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KSe7iJAbYh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ks2z7jeJeH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KSe7iJAbYh ++ cat /tmp/tmp.Ks2z7jeJeH ++ rm /tmp/tmp.KSe7iJAbYh /tmp/tmp.Ks2z7jeJeH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-15191 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-15191 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-15191 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dIkzX8VzWA +++ mktemp ++ local LAST_ERR=/tmp/tmp.NXIaLpCqWJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dIkzX8VzWA ++ cat /tmp/tmp.NXIaLpCqWJ ++ rm /tmp/tmp.dIkzX8VzWA /tmp/tmp.NXIaLpCqWJ ++ return 0 + local client_container=psmdb-client-66f577db5f-fkxp9 + kubectl_bin exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.yTfULk7hfT ++ mktemp + local LAST_ERR=/tmp/tmp.Imqjpuzgyb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-fkxp9 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-15191.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yTfULk7hfT + cat /tmp/tmp.Imqjpuzgyb + rm /tmp/tmp.yTfULk7hfT /tmp/tmp.Imqjpuzgyb + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/pitr-sharded/compare/find-3rd.json /tmp/tmp.gH7mk8jHRC/find-3rd + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LgBe64CzAr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QSZUaadBzC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LgBe64CzAr +++ cat /tmp/tmp.QSZUaadBzC +++ rm /tmp/tmp.LgBe64CzAr /tmp/tmp.QSZUaadBzC +++ return 0 ++ first_timestamp=1746703698 ++ sleep 5 ++ [[ 1746703698 != '' ]] ++ [[ 1746703698 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EVJYXra26k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PchvnN8PRc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EVJYXra26k +++ cat /tmp/tmp.PchvnN8PRc +++ rm /tmp/tmp.EVJYXra26k /tmp/tmp.PchvnN8PRc +++ return 0 ++ second_timestamp=1746703698 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1746703698 != '' ]] ++ [[ 1746703698 != \n\u\l\l ]] ++ [[ 1746703698 == 1746703698 ]] ++ /usr/bin/date -u -d @1746703698 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-05-08T11:28:18Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PxQqWl114o ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bUoGLW9kBF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PxQqWl114o +++ cat /tmp/tmp.bUoGLW9kBF +++ rm /tmp/tmp.PxQqWl114o /tmp/tmp.bUoGLW9kBF +++ return 0 ++ latestRestorableTime=2025-05-08T11:28:18Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-05-08T11:28:18Z != '' ]] ++ [[ 2025-05-08T11:28:18Z != \n\u\l\l ]] ++ echo 2025-05-08T11:28:18Z + backup_time=2025-05-08T11:28:18Z + [[ 2025-05-08T11:28:18Z != \2\0\2\5\-\0\5\-\0\8\T\1\1\:\2\8\:\1\8\Z ]] + desc 'delete custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- delete custom RuntimeClass ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.XhA0pOMub1 ++ mktemp + local LAST_ERR=/tmp/tmp.8uzu5GdGW9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XhA0pOMub1 runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.8uzu5GdGW9 + rm /tmp/tmp.XhA0pOMub1 /tmp/tmp.8uzu5GdGW9 + return 0 + destroy pitr-sharded-15191 + local namespace=pitr-sharded-15191 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.WJigOSAo3i ++ mktemp + local LAST_ERR=/tmp/tmp.vlmetjvsrS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WJigOSAo3i customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.vlmetjvsrS + rm /tmp/tmp.WJigOSAo3i /tmp/tmp.vlmetjvsrS + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.pE0GHmVcNM ++ mktemp + local LAST_ERR=/tmp/tmp.CsnWKXLqgS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pE0GHmVcNM + cat /tmp/tmp.CsnWKXLqgS + rm /tmp/tmp.pE0GHmVcNM /tmp/tmp.CsnWKXLqgS + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UMUMcMTbZh ++ mktemp + local LAST_ERR=/tmp/tmp.g4ojCjficx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UMUMcMTbZh + cat /tmp/tmp.g4ojCjficx + rm /tmp/tmp.UMUMcMTbZh /tmp/tmp.g4ojCjficx + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XofK7LKQ2m ++ mktemp + local LAST_ERR=/tmp/tmp.xl2XwyGatI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XofK7LKQ2m + cat /tmp/tmp.xl2XwyGatI + rm /tmp/tmp.XofK7LKQ2m /tmp/tmp.xl2XwyGatI + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.KfQJM2rqqj ++ mktemp + local LAST_ERR=/tmp/tmp.pnBWfYtJmU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KfQJM2rqqj clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.pnBWfYtJmU + rm /tmp/tmp.KfQJM2rqqj /tmp/tmp.pnBWfYtJmU + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.GsSfh8Q5K3 ++ mktemp + local LAST_ERR=/tmp/tmp.JoB9n8X3ap + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GsSfh8Q5K3 + cat /tmp/tmp.JoB9n8X3ap Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GsSfh8Q5K3 + cat /tmp/tmp.JoB9n8X3ap Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GsSfh8Q5K3 + cat /tmp/tmp.JoB9n8X3ap Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.GsSfh8Q5K3 + cat /tmp/tmp.JoB9n8X3ap Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.GsSfh8Q5K3 /tmp/tmp.JoB9n8X3ap + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-sharded-15191 + rm -rf /tmp/tmp.gH7mk8jHRC + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.91oMMXcv0m + local LAST_OUT=/tmp/tmp.exZB2MhbDk + desc 'test passed' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.SIc5BVsioL + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.NWnwGYHlnK + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-sharded-15191 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator