Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/logs/self-healing-chaos.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=some-name-rs0 + main + create_infra self-healing-chaos-891 + local ns=self-healing-chaos-891 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.1vHkcXwYnN ++ mktemp + local LAST_ERR=/tmp/tmp.YRomPfakHI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1vHkcXwYnN customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.YRomPfakHI + rm /tmp/tmp.1vHkcXwYnN /tmp/tmp.YRomPfakHI + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.AkW8cDBZnz ++ mktemp + local LAST_ERR=/tmp/tmp.8ephLCbM2U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AkW8cDBZnz + cat /tmp/tmp.8ephLCbM2U + rm /tmp/tmp.AkW8cDBZnz /tmp/tmp.8ephLCbM2U + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.9D4NLCAxXw ++ mktemp + local LAST_ERR=/tmp/tmp.EkVYxhTiP3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9D4NLCAxXw + cat /tmp/tmp.EkVYxhTiP3 + rm /tmp/tmp.9D4NLCAxXw /tmp/tmp.EkVYxhTiP3 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0UovRHA8Wu ++ mktemp + local LAST_ERR=/tmp/tmp.2C81B09rqd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0UovRHA8Wu + cat /tmp/tmp.2C81B09rqd + rm /tmp/tmp.0UovRHA8Wu /tmp/tmp.2C81B09rqd + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.7il6PfiAZw ++ mktemp + local LAST_ERR=/tmp/tmp.QucANul7SH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7il6PfiAZw clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.QucANul7SH + rm /tmp/tmp.7il6PfiAZw /tmp/tmp.QucANul7SH + return 0 + check_crd_for_deletion PR-1912-ab1be45a + local git_tag=PR-1912-ab1be45a ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1912-ab1be45a/deploy/crd.yaml ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j5AhUema02 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rrc29e0WwD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.j5AhUema02 ++ cat /tmp/tmp.Rrc29e0WwD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.j5AhUema02 ++ cat /tmp/tmp.Rrc29e0WwD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.j5AhUema02 ++ cat /tmp/tmp.Rrc29e0WwD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.j5AhUema02 ++ cat /tmp/tmp.Rrc29e0WwD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.j5AhUema02 /tmp/tmp.Rrc29e0WwD ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns=self-healing-chaos-2819 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-2819 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-2819 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete networkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace ++ awk '{print $1}' ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-2819' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-2819' + local name=chaos-cluster-pod-failure ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-2819' ++ awk '{print $3}' + local namespace=self-healing-chaos-2819 + kubectl patch PodChaos chaos-cluster-pod-failure -n self-healing-chaos-2819 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-failure patched + read -r line ++ awk '{print $1}' ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-2819' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-2819' + local name=chaos-cluster-pod-kill ++ awk '{print $3}' ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-2819' + local namespace=self-healing-chaos-2819 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-2819 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-failure" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.5ACIBuGmLj + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.xkq4pA5PJk + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + local LAST_OUT=/tmp/tmp.hlZxHs5XwF + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.EbuHkbBq0P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5ACIBuGmLj + cat /tmp/tmp.xkq4pA5PJk + rm /tmp/tmp.5ACIBuGmLj /tmp/tmp.xkq4pA5PJk + return 0 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "self-healing-chaos-2819" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hlZxHs5XwF namespace "psmdb-operator" deleted + cat /tmp/tmp.EbuHkbBq0P + rm /tmp/tmp.hlZxHs5XwF /tmp/tmp.EbuHkbBq0P + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.9YmMEZG41R ++ mktemp + local LAST_ERR=/tmp/tmp.7x11xjoHFh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9YmMEZG41R + cat /tmp/tmp.7x11xjoHFh + rm /tmp/tmp.9YmMEZG41R /tmp/tmp.7x11xjoHFh + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yYZdXRlB1p ++ mktemp + local LAST_ERR=/tmp/tmp.6aq0Gl1Bet + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yYZdXRlB1p namespace/psmdb-operator created + cat /tmp/tmp.6aq0Gl1Bet + rm /tmp/tmp.yYZdXRlB1p /tmp/tmp.6aq0Gl1Bet + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmac2qa3K6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.f3zeXFlOj5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nmac2qa3K6 ++ cat /tmp/tmp.f3zeXFlOj5 ++ rm /tmp/tmp.nmac2qa3K6 /tmp/tmp.f3zeXFlOj5 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dm7bqNZAOm ++ mktemp + local LAST_ERR=/tmp/tmp.ZPVfGgolTp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dm7bqNZAOm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4" modified. + cat /tmp/tmp.ZPVfGgolTp + rm /tmp/tmp.dm7bqNZAOm /tmp/tmp.ZPVfGgolTp + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.LDY8xXcxdp ++ mktemp + local LAST_ERR=/tmp/tmp.LzZbGTMsAS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LDY8xXcxdp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.LzZbGTMsAS + rm /tmp/tmp.LDY8xXcxdp /tmp/tmp.LzZbGTMsAS + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.7gJUmxNCnW ++ mktemp + local LAST_ERR=/tmp/tmp.Ke79JS1m72 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7gJUmxNCnW clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.Ke79JS1m72 + rm /tmp/tmp.7gJUmxNCnW /tmp/tmp.Ke79JS1m72 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Asno69yz1s ++ mktemp + local LAST_ERR=/tmp/tmp.WgMXXRU3Io + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Asno69yz1s deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.WgMXXRU3Io + rm /tmp/tmp.Asno69yz1s /tmp/tmp.WgMXXRU3Io + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.MdjyyOMtYO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ATIigGUJfp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MdjyyOMtYO ++ cat /tmp/tmp.ATIigGUJfp ++ rm /tmp/tmp.MdjyyOMtYO /tmp/tmp.ATIigGUJfp ++ return 0 + wait_pod percona-server-mongodb-operator-5b4f87c7bd-fjt2k + local pod=percona-server-mongodb-operator-5b4f87c7bd-fjt2k + set +o xtrace waiting for pod/percona-server-mongodb-operator-5b4f87c7bd-fjt2k to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.worP4tZmqK +++ mktemp ++ local LAST_ERR=/tmp/tmp.2cigBM7Ukw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.worP4tZmqK ++ cat /tmp/tmp.2cigBM7Ukw ++ rm /tmp/tmp.worP4tZmqK /tmp/tmp.2cigBM7Ukw ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-5b4f87c7bd-fjt2k ++ mktemp + local LAST_OUT=/tmp/tmp.xihX7GlIUy ++ mktemp + local LAST_ERR=/tmp/tmp.LrvIT4ThVt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-5b4f87c7bd-fjt2k + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xihX7GlIUy + cat /tmp/tmp.LrvIT4ThVt + rm /tmp/tmp.xihX7GlIUy /tmp/tmp.LrvIT4ThVt + return 0 2025-05-21T13:12:27.163Z INFO setup Manager starting up {"gitCommit": "ab1be45a35452d5d20912637f506f22dd6dd4c33", "gitBranch": "PR-1912-ab1be45a", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace self-healing-chaos-891 + local namespace=self-healing-chaos-891 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces self-healing-chaos-891' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces self-healing-chaos-891 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace self-healing-chaos-891 --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.bKfdoDvY7o ++ mktemp + local LAST_ERR=/tmp/tmp.6hKdDcIPI6 + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_OUT=/tmp/tmp.FeP3NMKVNa + for i in '$(seq 0 2)' + set +e ++ mktemp + kubectl delete namespace self-healing-chaos-891 --ignore-not-found + local LAST_ERR=/tmp/tmp.3FFeIm2Un4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FeP3NMKVNa + cat /tmp/tmp.3FFeIm2Un4 + rm /tmp/tmp.FeP3NMKVNa /tmp/tmp.3FFeIm2Un4 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bKfdoDvY7o + cat /tmp/tmp.6hKdDcIPI6 + rm /tmp/tmp.bKfdoDvY7o /tmp/tmp.6hKdDcIPI6 + return 0 + kubectl_bin wait --for=delete namespace self-healing-chaos-891 ++ mktemp + local LAST_OUT=/tmp/tmp.EziQHJChIN ++ mktemp + local LAST_ERR=/tmp/tmp.19pDkXMS9y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace self-healing-chaos-891 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EziQHJChIN + cat /tmp/tmp.19pDkXMS9y + rm /tmp/tmp.EziQHJChIN /tmp/tmp.19pDkXMS9y + return 0 + desc 'create namespace self-healing-chaos-891' + set +o xtrace ----------------------------------------------------------------------------------- create namespace self-healing-chaos-891 ----------------------------------------------------------------------------------- + kubectl_bin create namespace self-healing-chaos-891 ++ mktemp + local LAST_OUT=/tmp/tmp.J2ET2uYE9x ++ mktemp + local LAST_ERR=/tmp/tmp.sVvctTmw1Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace self-healing-chaos-891 namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J2ET2uYE9x namespace/self-healing-chaos-891 created + cat /tmp/tmp.sVvctTmw1Y + rm /tmp/tmp.J2ET2uYE9x /tmp/tmp.sVvctTmw1Y + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.1sjTrtyDYG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ckAmtMixwg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1sjTrtyDYG ++ cat /tmp/tmp.ckAmtMixwg ++ rm /tmp/tmp.1sjTrtyDYG /tmp/tmp.ckAmtMixwg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4 --namespace=self-healing-chaos-891 ++ mktemp + local LAST_OUT=/tmp/tmp.n8exhfLoyK ++ mktemp + local LAST_ERR=/tmp/tmp.Eqm3Wp7gBc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4 --namespace=self-healing-chaos-891 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n8exhfLoyK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster4" modified. + cat /tmp/tmp.Eqm3Wp7gBc + rm /tmp/tmp.n8exhfLoyK /tmp/tmp.Eqm3Wp7gBc + return 0 + deploy_chaos_mesh self-healing-chaos-891 + local chaos_mesh_ns=self-healing-chaos-891 + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" already exists with the same configuration, skipping + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=self-healing-chaos-891 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.7.1 NAME: chaos-mesh LAST DEPLOYED: Wed May 21 13:13:12 2025 NAMESPACE: self-healing-chaos-891 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace self-healing-chaos-891 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + setup_cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.pi9AHBoFnE ++ mktemp + local LAST_ERR=/tmp/tmp.DCPWMLxpBU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pi9AHBoFnE secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.DCPWMLxpBU + rm /tmp/tmp.pi9AHBoFnE /tmp/tmp.DCPWMLxpBU + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.XF9i8SmkR0 + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.gPSzTrv80Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XF9i8SmkR0 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.gPSzTrv80Y + rm /tmp/tmp.XF9i8SmkR0 /tmp/tmp.gPSzTrv80Y + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.megPBCqpqz +++ mktemp ++ local LAST_ERR=/tmp/tmp.a6QlAcwtMN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.megPBCqpqz ++ cat /tmp/tmp.a6QlAcwtMN ++ rm /tmp/tmp.megPBCqpqz /tmp/tmp.a6QlAcwtMN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6eixNNKDn0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bqFMAlc55c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6eixNNKDn0 ++ cat /tmp/tmp.bqFMAlc55c ++ rm /tmp/tmp.6eixNNKDn0 /tmp/tmp.bqFMAlc55c ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'enable backups' + set +o xtrace ----------------------------------------------------------------------------------- enable backups ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.28Mc1LyoRa ++ mktemp + local LAST_ERR=/tmp/tmp.AAYBNAnMjD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.28Mc1LyoRa perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.AAYBNAnMjD + rm /tmp/tmp.28Mc1LyoRa /tmp/tmp.AAYBNAnMjD + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LvHqOuOJAL +++ mktemp ++ local LAST_ERR=/tmp/tmp.p75aTjnWgH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LvHqOuOJAL ++ cat /tmp/tmp.p75aTjnWgH ++ rm /tmp/tmp.LvHqOuOJAL /tmp/tmp.p75aTjnWgH ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bj8wFU0RbA +++ mktemp ++ local LAST_ERR=/tmp/tmp.yqcOYZLKdL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bj8wFU0RbA ++ cat /tmp/tmp.yqcOYZLKdL ++ rm /tmp/tmp.Bj8wFU0RbA /tmp/tmp.yqcOYZLKdL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................ + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-891 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-891 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AapAG07HR2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.u1TDlHpzax ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AapAG07HR2 ++ cat /tmp/tmp.u1TDlHpzax ++ rm /tmp/tmp.AapAG07HR2 /tmp/tmp.u1TDlHpzax ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9dilJ66Xyu ++ mktemp + local LAST_ERR=/tmp/tmp.pCLoJZOZxp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9dilJ66Xyu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4bbf31fe-c6aa-4ca9-b5d8-654adab325d5") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.pCLoJZOZxp + rm /tmp/tmp.9dilJ66Xyu /tmp/tmp.pCLoJZOZxp + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.self-healing-chaos-891 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-891 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmLSSs2WGp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7IHN1PnO8D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fmLSSs2WGp ++ cat /tmp/tmp.7IHN1PnO8D ++ rm /tmp/tmp.fmLSSs2WGp /tmp/tmp.7IHN1PnO8D ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yBl5El9ADf ++ mktemp + local LAST_ERR=/tmp/tmp.fOH095CKbb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yBl5El9ADf Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7a0411f4-31b1-4ae5-9479-81e076c516b8") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.fOH095CKbb + rm /tmp/tmp.yBl5El9ADf /tmp/tmp.fOH095CKbb + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:16+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0p69OfTLsM +++ mktemp ++ local LAST_ERR=/tmp/tmp.15Bp0ifbHX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0p69OfTLsM ++ cat /tmp/tmp.15Bp0ifbHX ++ rm /tmp/tmp.0p69OfTLsM /tmp/tmp.15Bp0ifbHX ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GDYlgZEIEb ++ mktemp + local LAST_ERR=/tmp/tmp.4VFnyrmgBR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GDYlgZEIEb + cat /tmp/tmp.4VFnyrmgBR + rm /tmp/tmp.GDYlgZEIEb /tmp/tmp.4VFnyrmgBR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:19+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yUpYvprhuU +++ mktemp ++ local LAST_ERR=/tmp/tmp.uhipw5eykL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yUpYvprhuU ++ cat /tmp/tmp.uhipw5eykL ++ rm /tmp/tmp.yUpYvprhuU /tmp/tmp.uhipw5eykL ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ujNeFuKbG6 ++ mktemp + local LAST_ERR=/tmp/tmp.Qdc0f4z3Bm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ujNeFuKbG6 + cat /tmp/tmp.Qdc0f4z3Bm + rm /tmp/tmp.ujNeFuKbG6 /tmp/tmp.Qdc0f4z3Bm + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:22+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ilubb74sUI +++ mktemp ++ local LAST_ERR=/tmp/tmp.DxRLI3YpmJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ilubb74sUI ++ cat /tmp/tmp.DxRLI3YpmJ ++ rm /tmp/tmp.Ilubb74sUI /tmp/tmp.DxRLI3YpmJ ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.L8bq3juXDS ++ mktemp + local LAST_ERR=/tmp/tmp.XO0agw6Yvy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L8bq3juXDS + cat /tmp/tmp.XO0agw6Yvy + rm /tmp/tmp.L8bq3juXDS /tmp/tmp.XO0agw6Yvy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + desc 'recreate cluster' + set +o xtrace ----------------------------------------------------------------------------------- recreate cluster ----------------------------------------------------------------------------------- + recreate + desc 'delete PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.XEOxMGHk8Y ++ mktemp + local LAST_ERR=/tmp/tmp.SMKgYIaVib + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XEOxMGHk8Y perconaservermongodb.psmdb.percona.com "some-name" deleted + cat /tmp/tmp.SMKgYIaVib + rm /tmp/tmp.XEOxMGHk8Y /tmp/tmp.SMKgYIaVib + return 0 + wait_for_delete pod/some-name-rs0-2 + local res=pod/some-name-rs0-2 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-2 to be deleted.......Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found + wait_for_delete pod/some-name-rs0-1 + local res=pod/some-name-rs0-1 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-1 to be deletedError from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found + wait_for_delete pod/some-name-rs0-0 + local res=pod/some-name-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-0 to be deletedError from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found + desc 'create PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + local LAST_OUT=/tmp/tmp.p6ndma0w6k + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.797h2CcJBR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p6ndma0w6k perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.797h2CcJBR + rm /tmp/tmp.p6ndma0w6k /tmp/tmp.797h2CcJBR + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5FwPgdURR4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e9m14ypXGB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5FwPgdURR4 ++ cat /tmp/tmp.e9m14ypXGB ++ rm /tmp/tmp.5FwPgdURR4 /tmp/tmp.e9m14ypXGB ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0QsNkeSjrQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.KqChdHpdXr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0QsNkeSjrQ ++ cat /tmp/tmp.KqChdHpdXr ++ rm /tmp/tmp.0QsNkeSjrQ /tmp/tmp.KqChdHpdXr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dTWRFY6fXl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lzw5CBpFQr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dTWRFY6fXl ++ cat /tmp/tmp.Lzw5CBpFQr ++ rm /tmp/tmp.dTWRFY6fXl /tmp/tmp.Lzw5CBpFQr ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:20:10+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tt8D69RlVu +++ mktemp ++ local LAST_ERR=/tmp/tmp.It3Jt2HZ1K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tt8D69RlVu ++ cat /tmp/tmp.It3Jt2HZ1K ++ rm /tmp/tmp.Tt8D69RlVu /tmp/tmp.It3Jt2HZ1K ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mv2PCQltjj ++ mktemp + local LAST_ERR=/tmp/tmp.HZQxXZ88Sp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mv2PCQltjj + cat /tmp/tmp.HZQxXZ88Sp + rm /tmp/tmp.mv2PCQltjj /tmp/tmp.HZQxXZ88Sp + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:20:12+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dvY5lus7w4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ATz42qTebY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dvY5lus7w4 ++ cat /tmp/tmp.ATz42qTebY ++ rm /tmp/tmp.dvY5lus7w4 /tmp/tmp.ATz42qTebY ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.d8c3SQ0shW ++ mktemp + local LAST_ERR=/tmp/tmp.kUwmf2SR87 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d8c3SQ0shW + cat /tmp/tmp.kUwmf2SR87 + rm /tmp/tmp.d8c3SQ0shW /tmp/tmp.kUwmf2SR87 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:20:14+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r1Tgh1F1De +++ mktemp ++ local LAST_ERR=/tmp/tmp.rOQJCZ1Esf ++ local exit_status=0 ++ local timeout=4 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r1Tgh1F1De ++ cat /tmp/tmp.rOQJCZ1Esf ++ rm /tmp/tmp.r1Tgh1F1De /tmp/tmp.rOQJCZ1Esf ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bG86IEE7ba ++ mktemp + local LAST_ERR=/tmp/tmp.G3tOXJdT42 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bG86IEE7ba + cat /tmp/tmp.G3tOXJdT42 + rm /tmp/tmp.bG86IEE7ba /tmp/tmp.G3tOXJdT42 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + desc 'kill node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- kill node-0 pod ----------------------------------------------------------------------------------- + kill_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=88045 + yq eval ' .metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-891[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/chaos-pod-kill.yml + kubectl apply -f - podchaos.chaos-mesh.org/chaos-cluster-pod-kill created + sleep 5 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QsWArRfE11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eZib8F4Uwg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QsWArRfE11 ++ cat /tmp/tmp.eZib8F4Uwg ++ rm /tmp/tmp.QsWArRfE11 /tmp/tmp.eZib8F4Uwg ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JIW7GsZwaB +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ocr5Gtw99l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JIW7GsZwaB ++ cat /tmp/tmp.Ocr5Gtw99l ++ rm /tmp/tmp.JIW7GsZwaB /tmp/tmp.Ocr5Gtw99l ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JENAruQRGq +++ mktemp ++ local LAST_ERR=/tmp/tmp.rGPV5BBlnk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JENAruQRGq ++ cat /tmp/tmp.rGPV5BBlnk ++ rm /tmp/tmp.JENAruQRGq /tmp/tmp.rGPV5BBlnk ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + check_pod_restarted some-name-rs0-0 88045 + local pod=some-name-rs0-0 + local old_resourceVersion=88045 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local new_resourceVersion=89533 + [[ 88045 == \8\9\5\3\3 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:21:22+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YfaF7WzeXw +++ mktemp ++ local LAST_ERR=/tmp/tmp.qG9zEuHPGL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YfaF7WzeXw ++ cat /tmp/tmp.qG9zEuHPGL ++ rm /tmp/tmp.YfaF7WzeXw /tmp/tmp.qG9zEuHPGL ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NE2duJzZqk ++ mktemp + local LAST_ERR=/tmp/tmp.4GZss58gqO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NE2duJzZqk + cat /tmp/tmp.4GZss58gqO + rm /tmp/tmp.NE2duJzZqk /tmp/tmp.4GZss58gqO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.1L0GmDzP2p/find + desc 'fail node-0 pod for 60s' + set +o xtrace ----------------------------------------------------------------------------------- fail node-0 pod for 60s ----------------------------------------------------------------------------------- + failure_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=89533 + kubectl apply -f - + yq eval ' .metadata.name = "chaos-cluster-pod-failure" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-891[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/chaos-pod-failure.yml podchaos.chaos-mesh.org/chaos-cluster-pod-failure created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.self-healing-chaos-891 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-891 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.phdwYt5fjx +++ mktemp ++ local LAST_ERR=/tmp/tmp.pwdMnvwmk1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.phdwYt5fjx ++ cat /tmp/tmp.pwdMnvwmk1 ++ rm /tmp/tmp.phdwYt5fjx /tmp/tmp.pwdMnvwmk1 ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gkuReG68qi ++ mktemp + local LAST_ERR=/tmp/tmp.VBRZgZkuuU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gkuReG68qi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6c8dce15-a643-494a-bf8e-727583e1c3b9") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.VBRZgZkuuU + rm /tmp/tmp.gkuReG68qi /tmp/tmp.VBRZgZkuuU + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JJ5Iuz2Epg +++ mktemp ++ local LAST_ERR=/tmp/tmp.bhTNtxsYcZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JJ5Iuz2Epg ++ cat /tmp/tmp.bhTNtxsYcZ ++ rm /tmp/tmp.JJ5Iuz2Epg /tmp/tmp.bhTNtxsYcZ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YhRAZ0NMlP +++ mktemp ++ local LAST_ERR=/tmp/tmp.74Li8tVyUw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YhRAZ0NMlP ++ cat /tmp/tmp.74Li8tVyUw ++ rm /tmp/tmp.YhRAZ0NMlP /tmp/tmp.74Li8tVyUw ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PKjeceku00 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2FNUCOHDOu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PKjeceku00 ++ cat /tmp/tmp.2FNUCOHDOu ++ rm /tmp/tmp.PKjeceku00 /tmp/tmp.2FNUCOHDOu ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + check_pod_restarted some-name-rs0-0 89533 + local pod=some-name-rs0-0 + local old_resourceVersion=89533 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local new_resourceVersion=90736 + [[ 89533 == \9\0\7\3\6 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:22:52+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dc8COER8N6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dazW4R6Stb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dc8COER8N6 ++ cat /tmp/tmp.dazW4R6Stb ++ rm /tmp/tmp.dc8COER8N6 /tmp/tmp.dazW4R6Stb ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ffm97iBnj5 ++ mktemp + local LAST_ERR=/tmp/tmp.V6iRODId1E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ffm97iBnj5 + cat /tmp/tmp.V6iRODId1E + rm /tmp/tmp.ffm97iBnj5 /tmp/tmp.V6iRODId1E + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find-2nd.json /tmp/tmp.1L0GmDzP2p/find-2nd + desc 'emulate bad network node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- emulate bad network node-0 pod ----------------------------------------------------------------------------------- + network_loss some-name-rs0-0 + local pod=some-name-rs0-0 + yq eval ' .metadata.name = "chaos-cluster-network-loss" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-891[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/chaos-network-loss.yml + kubectl apply -f - networkchaos.chaos-mesh.org/chaos-cluster-network-loss created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.self-healing-chaos-891 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-891 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QdKzsctX2K +++ mktemp ++ local LAST_ERR=/tmp/tmp.AuEhGz9p06 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QdKzsctX2K ++ cat /tmp/tmp.AuEhGz9p06 ++ rm /tmp/tmp.QdKzsctX2K /tmp/tmp.AuEhGz9p06 ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Cg5tro99ZA ++ mktemp + local LAST_ERR=/tmp/tmp.AGPjqID9mF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cg5tro99ZA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-891.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ae586d0d-1ab4-463a-baf0-eea0694b3018") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.AGPjqID9mF + rm /tmp/tmp.Cg5tro99ZA /tmp/tmp.AGPjqID9mF + return 0 + sleep 60 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vlwuvKoga0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sa80YRdXOq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vlwuvKoga0 ++ cat /tmp/tmp.Sa80YRdXOq ++ rm /tmp/tmp.vlwuvKoga0 /tmp/tmp.Sa80YRdXOq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vTMRTkUz2i +++ mktemp ++ local LAST_ERR=/tmp/tmp.cwXplwn2yW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vTMRTkUz2i ++ cat /tmp/tmp.cwXplwn2yW ++ rm /tmp/tmp.vTMRTkUz2i /tmp/tmp.cwXplwn2yW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BpPHOA16di +++ mktemp ++ local LAST_ERR=/tmp/tmp.1wAzWVAwos ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BpPHOA16di ++ cat /tmp/tmp.1wAzWVAwos ++ rm /tmp/tmp.BpPHOA16di /tmp/tmp.1wAzWVAwos ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:24:37+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AbIRFQAvYd +++ mktemp ++ local LAST_ERR=/tmp/tmp.h77A7VPbpY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AbIRFQAvYd ++ cat /tmp/tmp.h77A7VPbpY ++ rm /tmp/tmp.AbIRFQAvYd /tmp/tmp.h77A7VPbpY ++ return 0 + local client_container=psmdb-client-66f577db5f-nw4fh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.z5JbQtz1uz ++ mktemp + local LAST_ERR=/tmp/tmp.JsXSUZb65L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-nw4fh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-891.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z5JbQtz1uz + cat /tmp/tmp.JsXSUZb65L + rm /tmp/tmp.z5JbQtz1uz /tmp/tmp.JsXSUZb65L + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/self-healing-chaos/compare/find-3rd.json /tmp/tmp.1L0GmDzP2p/find-3rd + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns=self-healing-chaos-891 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-891 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-891 release "chaos-mesh" uninstalled ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-891' + local kind=NetworkChaos ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-891' ++ awk '{print $2}' + local name=chaos-cluster-network-loss ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-891' ++ awk '{print $3}' + local namespace=self-healing-chaos-891 + kubectl patch NetworkChaos chaos-cluster-network-loss -n self-healing-chaos-891 --type=merge -p '{"metadata":{"finalizers":[]}}' networkchaos.chaos-mesh.org/chaos-cluster-network-loss patched + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces networkchaos.chaos-mesh.org "chaos-cluster-network-loss" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-891' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-891' ++ awk '{print $2}' + local name=chaos-cluster-pod-failure ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-891' ++ awk '{print $3}' + local namespace=self-healing-chaos-891 + kubectl patch PodChaos chaos-cluster-pod-failure -n self-healing-chaos-891 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-failure patched + read -r line ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-891' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-891' ++ awk '{print $2}' + local name=chaos-cluster-pod-kill ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-891' ++ awk '{print $3}' + local namespace=self-healing-chaos-891 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-891 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-failure" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-891' ++ awk '{print $1}' + local kind=PodNetworkChaos ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-891' ++ awk '{print $2}' + local name=some-name-rs0-0 ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-891' ++ awk '{print $3}' + local namespace=self-healing-chaos-891 + kubectl patch PodNetworkChaos some-name-rs0-0 -n self-healing-chaos-891 --type=merge -p '{"metadata":{"finalizers":[]}}' podnetworkchaos.chaos-mesh.org/some-name-rs0-0 patched (no change) + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces podnetworkchaos.chaos-mesh.org "some-name-rs0-0" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + destroy self-healing-chaos-891 + local namespace=self-healing-chaos-891 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.SXAp8R5oLd +++ mktemp ++ local LAST_ERR=/tmp/tmp.bJveTyIr8S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SXAp8R5oLd ++ cat /tmp/tmp.bJveTyIr8S No resources found in self-healing-chaos-891 namespace. ++ rm /tmp/tmp.SXAp8R5oLd /tmp/tmp.bJveTyIr8S ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.JAgC974vBu ++ mktemp + local LAST_ERR=/tmp/tmp.snMEqmBvvh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAgC974vBu customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.snMEqmBvvh + rm /tmp/tmp.JAgC974vBu /tmp/tmp.snMEqmBvvh + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.RBdXUPLW4D ++ mktemp + local LAST_ERR=/tmp/tmp.8aYwIrwbNV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RBdXUPLW4D + cat /tmp/tmp.8aYwIrwbNV + rm /tmp/tmp.RBdXUPLW4D /tmp/tmp.8aYwIrwbNV + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.TJmFEVY6PG ++ mktemp + local LAST_ERR=/tmp/tmp.xwKefRXln9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TJmFEVY6PG + cat /tmp/tmp.xwKefRXln9 + rm /tmp/tmp.TJmFEVY6PG /tmp/tmp.xwKefRXln9 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.50kbFHq6wP ++ mktemp + local LAST_ERR=/tmp/tmp.Z75iAa9Z79 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.50kbFHq6wP + cat /tmp/tmp.Z75iAa9Z79 + rm /tmp/tmp.50kbFHq6wP /tmp/tmp.Z75iAa9Z79 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.fQtViacEVf ++ mktemp + local LAST_ERR=/tmp/tmp.fIsoKYEZCI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fQtViacEVf clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.fIsoKYEZCI + rm /tmp/tmp.fQtViacEVf /tmp/tmp.fIsoKYEZCI + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.y6URqQqpAZ ++ mktemp + local LAST_ERR=/tmp/tmp.IjZKRh37X0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y6URqQqpAZ + cat /tmp/tmp.IjZKRh37X0 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y6URqQqpAZ + cat /tmp/tmp.IjZKRh37X0 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y6URqQqpAZ + cat /tmp/tmp.IjZKRh37X0 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.y6URqQqpAZ + cat /tmp/tmp.IjZKRh37X0 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.y6URqQqpAZ /tmp/tmp.IjZKRh37X0 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.1L0GmDzP2p + kubectl_bin delete --grace-period=0 --force=true namespace self-healing-chaos-891 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Z0JO18nwxL + local LAST_OUT=/tmp/tmp.mvSrMdqzwn ++ mktemp + local LAST_ERR=/tmp/tmp.K8JmuPCe0J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace self-healing-chaos-891 ++ mktemp + local LAST_ERR=/tmp/tmp.Gmvo5SbSug + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator