Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/logs/self-healing-chaos.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=some-name-rs0 + main + create_infra self-healing-chaos-30994 + local ns=self-healing-chaos-30994 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.B3NDWlvHQe ++ mktemp + local LAST_ERR=/tmp/tmp.VQtXgfzbcY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B3NDWlvHQe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.VQtXgfzbcY + rm /tmp/tmp.B3NDWlvHQe /tmp/tmp.VQtXgfzbcY + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eUyCQ6dl9N ++ mktemp + local LAST_ERR=/tmp/tmp.2S3iqzSWlE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eUyCQ6dl9N + cat /tmp/tmp.2S3iqzSWlE + rm /tmp/tmp.eUyCQ6dl9N /tmp/tmp.2S3iqzSWlE + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EQYYx3CODt ++ mktemp + local LAST_ERR=/tmp/tmp.Pu4qN8MzNs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EQYYx3CODt + cat /tmp/tmp.Pu4qN8MzNs + rm /tmp/tmp.EQYYx3CODt /tmp/tmp.Pu4qN8MzNs + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.nHXpfEj8NC ++ mktemp + local LAST_ERR=/tmp/tmp.zvtuIPBcqZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nHXpfEj8NC + cat /tmp/tmp.zvtuIPBcqZ + rm /tmp/tmp.nHXpfEj8NC /tmp/tmp.zvtuIPBcqZ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.zAvNNpdaOR ++ mktemp + local LAST_ERR=/tmp/tmp.H878Xqip3S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zAvNNpdaOR clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.H878Xqip3S + rm /tmp/tmp.zAvNNpdaOR /tmp/tmp.H878Xqip3S + return 0 + check_crd_for_deletion PR-1855-bb3d32e4 + local git_tag=PR-1855-bb3d32e4 ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1855-bb3d32e4/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SPKgECUjho +++ mktemp ++ local LAST_ERR=/tmp/tmp.9h3yEvGdKu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SPKgECUjho ++ cat /tmp/tmp.9h3yEvGdKu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SPKgECUjho ++ cat /tmp/tmp.9h3yEvGdKu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SPKgECUjho ++ cat /tmp/tmp.9h3yEvGdKu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.SPKgECUjho ++ cat /tmp/tmp.9h3yEvGdKu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.SPKgECUjho /tmp/tmp.9h3yEvGdKu ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete networkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.DRLX9esZ1L ++ mktemp + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.d9wmv2VoQZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.BAGWIYkzEd ++ mktemp + local LAST_ERR=/tmp/tmp.rtsz48WaFZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BAGWIYkzEd + cat /tmp/tmp.rtsz48WaFZ + rm /tmp/tmp.BAGWIYkzEd /tmp/tmp.rtsz48WaFZ + return 0 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "self-healing-chaos-3494" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DRLX9esZ1L namespace "psmdb-operator" deleted + cat /tmp/tmp.d9wmv2VoQZ + rm /tmp/tmp.DRLX9esZ1L /tmp/tmp.d9wmv2VoQZ + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mmKai8SnMN ++ mktemp + local LAST_ERR=/tmp/tmp.WhZKn6w60G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mmKai8SnMN + cat /tmp/tmp.WhZKn6w60G + rm /tmp/tmp.mmKai8SnMN /tmp/tmp.WhZKn6w60G + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cROUFzy3jz ++ mktemp + local LAST_ERR=/tmp/tmp.LQJOQSOdit + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cROUFzy3jz namespace/psmdb-operator created + cat /tmp/tmp.LQJOQSOdit + rm /tmp/tmp.cROUFzy3jz /tmp/tmp.LQJOQSOdit + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jez46UBN9K +++ mktemp ++ local LAST_ERR=/tmp/tmp.IccyJbUH7B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jez46UBN9K ++ cat /tmp/tmp.IccyJbUH7B ++ rm /tmp/tmp.Jez46UBN9K /tmp/tmp.IccyJbUH7B ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.T9fu6WQirs ++ mktemp + local LAST_ERR=/tmp/tmp.X0XOXIEAjL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T9fu6WQirs Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13" modified. + cat /tmp/tmp.X0XOXIEAjL + rm /tmp/tmp.T9fu6WQirs /tmp/tmp.X0XOXIEAjL + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.h9WY9eQnZ2 ++ mktemp + local LAST_ERR=/tmp/tmp.Lr9NObTKd6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h9WY9eQnZ2 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Lr9NObTKd6 + rm /tmp/tmp.h9WY9eQnZ2 /tmp/tmp.Lr9NObTKd6 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vXM66XrBHb ++ mktemp + local LAST_ERR=/tmp/tmp.C9KqLMmbuF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vXM66XrBHb clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.C9KqLMmbuF + rm /tmp/tmp.vXM66XrBHb /tmp/tmp.C9KqLMmbuF + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hXcX27ZFu0 ++ mktemp + local LAST_ERR=/tmp/tmp.AsIHsd7vJv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hXcX27ZFu0 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.AsIHsd7vJv + rm /tmp/tmp.hXcX27ZFu0 /tmp/tmp.AsIHsd7vJv + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1a8j6jNRsm +++ mktemp ++ local LAST_ERR=/tmp/tmp.SwLdgKzzwB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1a8j6jNRsm ++ cat /tmp/tmp.SwLdgKzzwB ++ rm /tmp/tmp.1a8j6jNRsm /tmp/tmp.SwLdgKzzwB ++ return 0 + wait_pod percona-server-mongodb-operator-79f5d4dcd6-9xqv4 + local pod=percona-server-mongodb-operator-79f5d4dcd6-9xqv4 + set +o xtrace waiting for pod/percona-server-mongodb-operator-79f5d4dcd6-9xqv4 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.dS9xI0zM3B +++ mktemp ++ local LAST_ERR=/tmp/tmp.VWKfO9vm7a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dS9xI0zM3B ++ cat /tmp/tmp.VWKfO9vm7a ++ rm /tmp/tmp.dS9xI0zM3B /tmp/tmp.VWKfO9vm7a ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-79f5d4dcd6-9xqv4 ++ mktemp + local LAST_OUT=/tmp/tmp.JBJ5kHr8OI ++ mktemp + local LAST_ERR=/tmp/tmp.SceIUY1VJA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-79f5d4dcd6-9xqv4 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JBJ5kHr8OI + cat /tmp/tmp.SceIUY1VJA + rm /tmp/tmp.JBJ5kHr8OI /tmp/tmp.SceIUY1VJA + return 0 2025-05-28T17:47:48.866Z INFO setup Manager starting up {"gitCommit": "bb3d32e498b3a92c096b809e60928f73eda8778f", "gitBranch": "PR-1855-bb3d32e4", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace self-healing-chaos-30994 + local namespace=self-healing-chaos-30994 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces self-healing-chaos-30994' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces self-healing-chaos-30994 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace self-healing-chaos-30994 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.lS66Dn0kaA + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.wXBsqhNK6x ++ mktemp + local LAST_ERR=/tmp/tmp.SyLc4jXJRZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.wivvvbFOsq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace self-healing-chaos-30994 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wXBsqhNK6x + cat /tmp/tmp.SyLc4jXJRZ + rm /tmp/tmp.wXBsqhNK6x /tmp/tmp.SyLc4jXJRZ + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lS66Dn0kaA + cat /tmp/tmp.wivvvbFOsq + rm /tmp/tmp.lS66Dn0kaA /tmp/tmp.wivvvbFOsq + return 0 + kubectl_bin wait --for=delete namespace self-healing-chaos-30994 ++ mktemp + local LAST_OUT=/tmp/tmp.JsB8pKv8YJ ++ mktemp + local LAST_ERR=/tmp/tmp.9jmII3r7lz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace self-healing-chaos-30994 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JsB8pKv8YJ + cat /tmp/tmp.9jmII3r7lz + rm /tmp/tmp.JsB8pKv8YJ /tmp/tmp.9jmII3r7lz + return 0 + desc 'create namespace self-healing-chaos-30994' + set +o xtrace ----------------------------------------------------------------------------------- create namespace self-healing-chaos-30994 ----------------------------------------------------------------------------------- + kubectl_bin create namespace self-healing-chaos-30994 ++ mktemp + local LAST_OUT=/tmp/tmp.xdLQg45ZCf ++ mktemp + local LAST_ERR=/tmp/tmp.S2u2Z7orkj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace self-healing-chaos-30994 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xdLQg45ZCf namespace/self-healing-chaos-30994 created + cat /tmp/tmp.S2u2Z7orkj + rm /tmp/tmp.xdLQg45ZCf /tmp/tmp.S2u2Z7orkj + return 0 + set_kube_ctx self-healing-chaos-30994 + local namespace=self-healing-chaos-30994 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KCNrAadV5B +++ mktemp ++ local LAST_ERR=/tmp/tmp.mL5cmvEWkm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KCNrAadV5B ++ cat /tmp/tmp.mL5cmvEWkm ++ rm /tmp/tmp.KCNrAadV5B /tmp/tmp.mL5cmvEWkm ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13 --namespace=self-healing-chaos-30994 ++ mktemp + local LAST_OUT=/tmp/tmp.50HE8xEHA2 ++ mktemp + local LAST_ERR=/tmp/tmp.hgYLVh6d5k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13 --namespace=self-healing-chaos-30994 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.50HE8xEHA2 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster13" modified. + cat /tmp/tmp.hgYLVh6d5k + rm /tmp/tmp.50HE8xEHA2 /tmp/tmp.hgYLVh6d5k + return 0 + deploy_chaos_mesh self-healing-chaos-30994 + local chaos_mesh_ns=self-healing-chaos-30994 + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" already exists with the same configuration, skipping + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=self-healing-chaos-30994 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.7.1 NAME: chaos-mesh LAST DEPLOYED: Wed May 28 17:48:43 2025 NAMESPACE: self-healing-chaos-30994 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace self-healing-chaos-30994 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + setup_cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.QEQwU6rCcq ++ mktemp + local LAST_ERR=/tmp/tmp.TmBQHUxJ6U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QEQwU6rCcq secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.TmBQHUxJ6U + rm /tmp/tmp.QEQwU6rCcq /tmp/tmp.TmBQHUxJ6U + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.jonHTEmahZ + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + local LAST_ERR=/tmp/tmp.J473lM3COn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jonHTEmahZ perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.J473lM3COn + rm /tmp/tmp.jonHTEmahZ /tmp/tmp.J473lM3COn + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hLDUzp3Mk9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uuV0cIDPaj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hLDUzp3Mk9 ++ cat /tmp/tmp.uuV0cIDPaj ++ rm /tmp/tmp.hLDUzp3Mk9 /tmp/tmp.uuV0cIDPaj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vZ26Hd442H +++ mktemp ++ local LAST_ERR=/tmp/tmp.B3FvklsdBg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vZ26Hd442H ++ cat /tmp/tmp.B3FvklsdBg ++ rm /tmp/tmp.vZ26Hd442H /tmp/tmp.B3FvklsdBg ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cNCcULNMYk +++ mktemp ++ local LAST_ERR=/tmp/tmp.lIMw31Ajc4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cNCcULNMYk ++ cat /tmp/tmp.lIMw31Ajc4 ++ rm /tmp/tmp.cNCcULNMYk /tmp/tmp.lIMw31Ajc4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'enable backups' + set +o xtrace ----------------------------------------------------------------------------------- enable backups ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.GnnoNdy4ai ++ mktemp + local LAST_ERR=/tmp/tmp.6LjJd2PPC8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GnnoNdy4ai perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.6LjJd2PPC8 + rm /tmp/tmp.GnnoNdy4ai /tmp/tmp.6LjJd2PPC8 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hyYQr07eXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.WZiQE5NYUc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hyYQr07eXj ++ cat /tmp/tmp.WZiQE5NYUc ++ rm /tmp/tmp.hyYQr07eXj /tmp/tmp.WZiQE5NYUc ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7EgLlKHm8g +++ mktemp ++ local LAST_ERR=/tmp/tmp.mSdc5YN2ly ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7EgLlKHm8g ++ cat /tmp/tmp.mSdc5YN2ly ++ rm /tmp/tmp.7EgLlKHm8g /tmp/tmp.mSdc5YN2ly ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OIGAH8C3VK +++ mktemp ++ local LAST_ERR=/tmp/tmp.qxOViJo4v8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OIGAH8C3VK ++ cat /tmp/tmp.qxOViJo4v8 ++ rm /tmp/tmp.OIGAH8C3VK /tmp/tmp.qxOViJo4v8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................. + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-30994 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-30994 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kZnC5nCqIY +++ mktemp ++ local LAST_ERR=/tmp/tmp.PMGZDjXn9F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kZnC5nCqIY ++ cat /tmp/tmp.PMGZDjXn9F ++ rm /tmp/tmp.kZnC5nCqIY /tmp/tmp.PMGZDjXn9F ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RFKBYm8d8l ++ mktemp + local LAST_ERR=/tmp/tmp.OwB3bSSIZn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RFKBYm8d8l Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ca92fc0a-120d-420e-b52f-9d3f0649391c") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.OwB3bSSIZn + rm /tmp/tmp.RFKBYm8d8l /tmp/tmp.OwB3bSSIZn + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2D1iPiz0Zk +++ mktemp ++ local LAST_ERR=/tmp/tmp.gzP2MoZFk3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2D1iPiz0Zk ++ cat /tmp/tmp.gzP2MoZFk3 ++ rm /tmp/tmp.2D1iPiz0Zk /tmp/tmp.gzP2MoZFk3 ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3Up4VMgdwt ++ mktemp + local LAST_ERR=/tmp/tmp.IO6mQ1CGRE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Up4VMgdwt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1f3d4928-8404-48c2-8ecf-14426afc7e18") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.IO6mQ1CGRE + rm /tmp/tmp.3Up4VMgdwt /tmp/tmp.IO6mQ1CGRE + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:52:57+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rLuk9JizJk +++ mktemp ++ local LAST_ERR=/tmp/tmp.P93MuTtKcC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rLuk9JizJk ++ cat /tmp/tmp.P93MuTtKcC ++ rm /tmp/tmp.rLuk9JizJk /tmp/tmp.P93MuTtKcC ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jB7Kll0pDz ++ mktemp + local LAST_ERR=/tmp/tmp.FacHM2ZN9t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jB7Kll0pDz + cat /tmp/tmp.FacHM2ZN9t + rm /tmp/tmp.jB7Kll0pDz /tmp/tmp.FacHM2ZN9t + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:53:01+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.iU10u5WNsK +++ mktemp ++ local LAST_ERR=/tmp/tmp.L7ETRWvAPE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iU10u5WNsK ++ cat /tmp/tmp.L7ETRWvAPE ++ rm /tmp/tmp.iU10u5WNsK /tmp/tmp.L7ETRWvAPE ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IOZV8a4ray ++ mktemp + local LAST_ERR=/tmp/tmp.OptKDryyAl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IOZV8a4ray + cat /tmp/tmp.OptKDryyAl + rm /tmp/tmp.IOZV8a4ray /tmp/tmp.OptKDryyAl + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:53:04+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1yLBgY1fPF +++ mktemp ++ local LAST_ERR=/tmp/tmp.tJliXqr65w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1yLBgY1fPF ++ cat /tmp/tmp.tJliXqr65w ++ rm /tmp/tmp.1yLBgY1fPF /tmp/tmp.tJliXqr65w ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.sU4tWwusGk ++ mktemp + local LAST_ERR=/tmp/tmp.XFBcZZv2rn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sU4tWwusGk + cat /tmp/tmp.XFBcZZv2rn + rm /tmp/tmp.sU4tWwusGk /tmp/tmp.XFBcZZv2rn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + desc 'recreate cluster' + set +o xtrace ----------------------------------------------------------------------------------- recreate cluster ----------------------------------------------------------------------------------- + recreate + desc 'delete PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dHo8c3dQ1e ++ mktemp + local LAST_ERR=/tmp/tmp.DOQK6GqJsK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dHo8c3dQ1e perconaservermongodb.psmdb.percona.com "some-name" deleted + cat /tmp/tmp.DOQK6GqJsK + rm /tmp/tmp.dHo8c3dQ1e /tmp/tmp.DOQK6GqJsK + return 0 + wait_for_delete pod/some-name-rs0-2 + local res=pod/some-name-rs0-2 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-2 to be deleted........Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found + wait_for_delete pod/some-name-rs0-1 + local res=pod/some-name-rs0-1 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-1 to be deletedError from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found + wait_for_delete pod/some-name-rs0-0 + local res=pod/some-name-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-0 to be deletedError from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found + desc 'create PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.6jZZL8dxmQ + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4"' ++ mktemp + local LAST_ERR=/tmp/tmp.QVvZvQzlBq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6jZZL8dxmQ perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.QVvZvQzlBq + rm /tmp/tmp.6jZZL8dxmQ /tmp/tmp.QVvZvQzlBq + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ba0uNDerxI +++ mktemp ++ local LAST_ERR=/tmp/tmp.hXP4GIBt51 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ba0uNDerxI ++ cat /tmp/tmp.hXP4GIBt51 ++ rm /tmp/tmp.ba0uNDerxI /tmp/tmp.hXP4GIBt51 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KwjuvkEevy +++ mktemp ++ local LAST_ERR=/tmp/tmp.fOtlvuwQzJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KwjuvkEevy ++ cat /tmp/tmp.fOtlvuwQzJ ++ rm /tmp/tmp.KwjuvkEevy /tmp/tmp.fOtlvuwQzJ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DIvsRGne2l +++ mktemp ++ local LAST_ERR=/tmp/tmp.lIDuXL8Dxc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DIvsRGne2l ++ cat /tmp/tmp.lIDuXL8Dxc ++ rm /tmp/tmp.DIvsRGne2l /tmp/tmp.lIDuXL8Dxc ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i8GiDlK1xF +++ mktemp ++ local LAST_ERR=/tmp/tmp.TtkJ0yKbUS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i8GiDlK1xF ++ cat /tmp/tmp.TtkJ0yKbUS ++ rm /tmp/tmp.i8GiDlK1xF /tmp/tmp.TtkJ0yKbUS ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:55:48+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uaSrgfOLfq +++ mktemp ++ local LAST_ERR=/tmp/tmp.IOCgCxvZhE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uaSrgfOLfq ++ cat /tmp/tmp.IOCgCxvZhE ++ rm /tmp/tmp.uaSrgfOLfq /tmp/tmp.IOCgCxvZhE ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gFsCZCqhp6 ++ mktemp + local LAST_ERR=/tmp/tmp.FOk3uEr7Xo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gFsCZCqhp6 + cat /tmp/tmp.FOk3uEr7Xo + rm /tmp/tmp.gFsCZCqhp6 /tmp/tmp.FOk3uEr7Xo + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:55:51+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zuU5pYdzhB +++ mktemp ++ local LAST_ERR=/tmp/tmp.WKkdbjp1Dw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zuU5pYdzhB ++ cat /tmp/tmp.WKkdbjp1Dw ++ rm /tmp/tmp.zuU5pYdzhB /tmp/tmp.WKkdbjp1Dw ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ws09r0yin8 ++ mktemp + local LAST_ERR=/tmp/tmp.1mwIrOThaB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ws09r0yin8 + cat /tmp/tmp.1mwIrOThaB + rm /tmp/tmp.ws09r0yin8 /tmp/tmp.1mwIrOThaB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:55:54+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZAmGRPM2fh +++ mktemp ++ local LAST_ERR=/tmp/tmp.CwISLlCIHa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZAmGRPM2fh ++ cat /tmp/tmp.CwISLlCIHa ++ rm /tmp/tmp.ZAmGRPM2fh /tmp/tmp.CwISLlCIHa ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.td7cQ4ccr0 ++ mktemp + local LAST_ERR=/tmp/tmp.lIEQcvPTpy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.td7cQ4ccr0 + cat /tmp/tmp.lIEQcvPTpy + rm /tmp/tmp.td7cQ4ccr0 /tmp/tmp.lIEQcvPTpy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + desc 'kill node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- kill node-0 pod ----------------------------------------------------------------------------------- + kill_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=1748454880631407002 + yq eval ' .metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-30994[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/chaos-pod-kill.yml + kubectl apply -f - podchaos.chaos-mesh.org/chaos-cluster-pod-kill created + sleep 5 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YmGogRI8Uu +++ mktemp ++ local LAST_ERR=/tmp/tmp.NvRofNZnLq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YmGogRI8Uu ++ cat /tmp/tmp.NvRofNZnLq ++ rm /tmp/tmp.YmGogRI8Uu /tmp/tmp.NvRofNZnLq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bkjHVfY8If +++ mktemp ++ local LAST_ERR=/tmp/tmp.h13ZKluBHT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bkjHVfY8If ++ cat /tmp/tmp.h13ZKluBHT ++ rm /tmp/tmp.bkjHVfY8If /tmp/tmp.h13ZKluBHT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C2B9XXy0jz +++ mktemp ++ local LAST_ERR=/tmp/tmp.NwF6i1oVMn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C2B9XXy0jz ++ cat /tmp/tmp.NwF6i1oVMn ++ rm /tmp/tmp.C2B9XXy0jz /tmp/tmp.NwF6i1oVMn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h9sIZ2Pqhk +++ mktemp ++ local LAST_ERR=/tmp/tmp.14Es06C1yf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h9sIZ2Pqhk ++ cat /tmp/tmp.14Es06C1yf ++ rm /tmp/tmp.h9sIZ2Pqhk /tmp/tmp.14Es06C1yf ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + check_pod_restarted some-name-rs0-0 1748454880631407002 + local pod=some-name-rs0-0 + local old_resourceVersion=1748454880631407002 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local new_resourceVersion=1748454989531583002 + [[ 1748454880631407002 == \1\7\4\8\4\5\4\9\8\9\5\3\1\5\8\3\0\0\2 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:56:56+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p7tK0lospJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.gq4sz506oo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p7tK0lospJ ++ cat /tmp/tmp.gq4sz506oo ++ rm /tmp/tmp.p7tK0lospJ /tmp/tmp.gq4sz506oo ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8fNYWkqGIX ++ mktemp + local LAST_ERR=/tmp/tmp.oIZFW26DIn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8fNYWkqGIX + cat /tmp/tmp.oIZFW26DIn + rm /tmp/tmp.8fNYWkqGIX /tmp/tmp.oIZFW26DIn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.Q15JaBzEYp/find + desc 'fail node-0 pod for 60s' + set +o xtrace ----------------------------------------------------------------------------------- fail node-0 pod for 60s ----------------------------------------------------------------------------------- + failure_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ tr -d '"' ++ jq .metadata.resourceVersion + local old_resourceVersion=1748454989531583002 + yq eval ' .metadata.name = "chaos-cluster-pod-failure" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-30994[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/chaos-pod-failure.yml + kubectl apply -f - podchaos.chaos-mesh.org/chaos-cluster-pod-failure created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2iTRro8OLh +++ mktemp ++ local LAST_ERR=/tmp/tmp.wNCBnlsa7B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2iTRro8OLh ++ cat /tmp/tmp.wNCBnlsa7B ++ rm /tmp/tmp.2iTRro8OLh /tmp/tmp.wNCBnlsa7B ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jUvL3kbO8w ++ mktemp + local LAST_ERR=/tmp/tmp.dG2SX6mfQN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jUvL3kbO8w Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d7748e8b-8232-4906-877e-e1e0a8508757") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.dG2SX6mfQN + rm /tmp/tmp.jUvL3kbO8w /tmp/tmp.dG2SX6mfQN + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.05KR5k22Qt +++ mktemp ++ local LAST_ERR=/tmp/tmp.ll7K73FZKv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.05KR5k22Qt ++ cat /tmp/tmp.ll7K73FZKv ++ rm /tmp/tmp.05KR5k22Qt /tmp/tmp.ll7K73FZKv ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0OTimWMvIo +++ mktemp ++ local LAST_ERR=/tmp/tmp.brCX2j3Mfa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0OTimWMvIo ++ cat /tmp/tmp.brCX2j3Mfa ++ rm /tmp/tmp.0OTimWMvIo /tmp/tmp.brCX2j3Mfa ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HHFPlKQgF5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NmZ5dbBzIS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HHFPlKQgF5 ++ cat /tmp/tmp.NmZ5dbBzIS ++ rm /tmp/tmp.HHFPlKQgF5 /tmp/tmp.NmZ5dbBzIS ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................. + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GNJofL76T4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dHZgzXTDgt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GNJofL76T4 ++ cat /tmp/tmp.dHZgzXTDgt ++ rm /tmp/tmp.GNJofL76T4 /tmp/tmp.dHZgzXTDgt ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + check_pod_restarted some-name-rs0-0 1748454989531583002 + local pod=some-name-rs0-0 + local old_resourceVersion=1748454989531583002 ++ kubectl get pod some-name-rs0-0 -ojson ++ tr -d '"' ++ jq .metadata.resourceVersion + local new_resourceVersion=1748455094519855002 + [[ 1748454989531583002 == \1\7\4\8\4\5\5\0\9\4\5\1\9\8\5\5\0\0\2 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T17:58:26+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WIws0aUaOH +++ mktemp ++ local LAST_ERR=/tmp/tmp.bzmzy7z6d3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WIws0aUaOH ++ cat /tmp/tmp.bzmzy7z6d3 ++ rm /tmp/tmp.WIws0aUaOH /tmp/tmp.bzmzy7z6d3 ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cAPCr7R6H5 ++ mktemp + local LAST_ERR=/tmp/tmp.8p7dUAIa7x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cAPCr7R6H5 + cat /tmp/tmp.8p7dUAIa7x + rm /tmp/tmp.cAPCr7R6H5 /tmp/tmp.8p7dUAIa7x + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find-2nd.json /tmp/tmp.Q15JaBzEYp/find-2nd + desc 'emulate bad network node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- emulate bad network node-0 pod ----------------------------------------------------------------------------------- + network_loss some-name-rs0-0 + local pod=some-name-rs0-0 + yq eval ' .metadata.name = "chaos-cluster-network-loss" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-30994[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/chaos-network-loss.yml + kubectl apply -f - networkchaos.chaos-mesh.org/chaos-cluster-network-loss created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-30994 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0e4Ar0encq +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sr4n6hUk8v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0e4Ar0encq ++ cat /tmp/tmp.Sr4n6hUk8v ++ rm /tmp/tmp.0e4Ar0encq /tmp/tmp.Sr4n6hUk8v ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.F1dSzvztcV ++ mktemp + local LAST_ERR=/tmp/tmp.IJcjBP8IbS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F1dSzvztcV Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-30994.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a4bee312-7730-4799-b781-d0e2e1204a3e") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.IJcjBP8IbS + rm /tmp/tmp.F1dSzvztcV /tmp/tmp.IJcjBP8IbS + return 0 + sleep 60 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OiG21FT7uh +++ mktemp ++ local LAST_ERR=/tmp/tmp.wgVR5A6LJo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OiG21FT7uh ++ cat /tmp/tmp.wgVR5A6LJo ++ rm /tmp/tmp.OiG21FT7uh /tmp/tmp.wgVR5A6LJo ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.usNerBFDc7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dg7FrPLEZD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.usNerBFDc7 ++ cat /tmp/tmp.dg7FrPLEZD ++ rm /tmp/tmp.usNerBFDc7 /tmp/tmp.dg7FrPLEZD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ktJLetl1db +++ mktemp ++ local LAST_ERR=/tmp/tmp.4cPmjUW06T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ktJLetl1db ++ cat /tmp/tmp.4cPmjUW06T ++ rm /tmp/tmp.ktJLetl1db /tmp/tmp.4cPmjUW06T ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SzQzgHszMc +++ mktemp ++ local LAST_ERR=/tmp/tmp.lynmDIkoim ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SzQzgHszMc ++ cat /tmp/tmp.lynmDIkoim ++ rm /tmp/tmp.SzQzgHszMc /tmp/tmp.lynmDIkoim ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-28T18:00:10+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.cCq5WrL2hD +++ mktemp ++ local LAST_ERR=/tmp/tmp.t8RefpUYfM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cCq5WrL2hD ++ cat /tmp/tmp.t8RefpUYfM ++ rm /tmp/tmp.cCq5WrL2hD /tmp/tmp.t8RefpUYfM ++ return 0 + local client_container=psmdb-client-66f577db5f-dd49m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GVeLec49wF ++ mktemp + local LAST_ERR=/tmp/tmp.JWttXFyksD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-dd49m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-30994.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GVeLec49wF + cat /tmp/tmp.JWttXFyksD + rm /tmp/tmp.GVeLec49wF /tmp/tmp.JWttXFyksD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/self-healing-chaos/compare/find-3rd.json /tmp/tmp.Q15JaBzEYp/find-3rd + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns=self-healing-chaos-30994 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-30994 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-30994 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-30994' + local kind=NetworkChaos ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-30994' ++ awk '{print $2}' + local name=chaos-cluster-network-loss ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-30994' ++ awk '{print $3}' + local namespace=self-healing-chaos-30994 + kubectl patch NetworkChaos chaos-cluster-network-loss -n self-healing-chaos-30994 --type=merge -p '{"metadata":{"finalizers":[]}}' networkchaos.chaos-mesh.org/chaos-cluster-network-loss patched + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces networkchaos.chaos-mesh.org "chaos-cluster-network-loss" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-30994' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-30994' ++ awk '{print $2}' + local name=chaos-cluster-pod-failure ++ awk '{print $3}' ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-30994' + local namespace=self-healing-chaos-30994 + kubectl patch PodChaos chaos-cluster-pod-failure -n self-healing-chaos-30994 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-failure patched + read -r line ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30994' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30994' ++ awk '{print $2}' + local name=chaos-cluster-pod-kill ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30994' ++ awk '{print $3}' + local namespace=self-healing-chaos-30994 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-30994 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-failure" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-30994' ++ awk '{print $1}' + local kind=PodNetworkChaos ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-30994' ++ awk '{print $2}' + local name=some-name-rs0-0 ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-30994' ++ awk '{print $3}' + local namespace=self-healing-chaos-30994 + kubectl patch PodNetworkChaos some-name-rs0-0 -n self-healing-chaos-30994 --type=merge -p '{"metadata":{"finalizers":[]}}' podnetworkchaos.chaos-mesh.org/some-name-rs0-0 patched (no change) + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces podnetworkchaos.chaos-mesh.org "some-name-rs0-0" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + destroy self-healing-chaos-30994 + local namespace=self-healing-chaos-30994 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.8WTmTd2qxj +++ mktemp ++ local LAST_ERR=/tmp/tmp.RxBcGcWuXd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8WTmTd2qxj ++ cat /tmp/tmp.RxBcGcWuXd No resources found in self-healing-chaos-30994 namespace. ++ rm /tmp/tmp.8WTmTd2qxj /tmp/tmp.RxBcGcWuXd ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.lH9nzpdIi9 ++ mktemp + local LAST_ERR=/tmp/tmp.J5XYU3Awgo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lH9nzpdIi9 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.J5XYU3Awgo + rm /tmp/tmp.lH9nzpdIi9 /tmp/tmp.J5XYU3Awgo + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5sWCA2KCOt ++ mktemp + local LAST_ERR=/tmp/tmp.IITDH6apWA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5sWCA2KCOt + cat /tmp/tmp.IITDH6apWA + rm /tmp/tmp.5sWCA2KCOt /tmp/tmp.IITDH6apWA + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sAEg46xZIi ++ mktemp + local LAST_ERR=/tmp/tmp.ZamK9sdqWq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sAEg46xZIi + cat /tmp/tmp.ZamK9sdqWq + rm /tmp/tmp.sAEg46xZIi /tmp/tmp.ZamK9sdqWq + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fniLqC0TQv ++ mktemp + local LAST_ERR=/tmp/tmp.lOy0tDfHEa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fniLqC0TQv + cat /tmp/tmp.lOy0tDfHEa + rm /tmp/tmp.fniLqC0TQv /tmp/tmp.lOy0tDfHEa + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.24xpKW47u9 ++ mktemp + local LAST_ERR=/tmp/tmp.rtou7qKbs3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.24xpKW47u9 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.rtou7qKbs3 + rm /tmp/tmp.24xpKW47u9 /tmp/tmp.rtou7qKbs3 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2zCZ5dEeNy ++ mktemp + local LAST_ERR=/tmp/tmp.3LglRsDdY5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2zCZ5dEeNy + cat /tmp/tmp.3LglRsDdY5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2zCZ5dEeNy + cat /tmp/tmp.3LglRsDdY5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2zCZ5dEeNy + cat /tmp/tmp.3LglRsDdY5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.2zCZ5dEeNy + cat /tmp/tmp.3LglRsDdY5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.2zCZ5dEeNy /tmp/tmp.3LglRsDdY5 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace self-healing-chaos-30994 + rm -rf /tmp/tmp.Q15JaBzEYp + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5JcStwBzX6 ++ mktemp + local LAST_ERR=/tmp/tmp.bojPbKr42m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace self-healing-chaos-30994 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.KyGB3HxP0p ++ mktemp + local LAST_ERR=/tmp/tmp.SZObc7Y8uT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator