Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/logs/operator-self-healing-chaos.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=some-name-rs0 + main + create_infra operator-self-healing-chaos-22383 + local ns=operator-self-healing-chaos-22383 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.R1h2YB7mJj ++ mktemp + local LAST_ERR=/tmp/tmp.30DXOgs7mc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R1h2YB7mJj customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.30DXOgs7mc + rm /tmp/tmp.R1h2YB7mJj /tmp/tmp.30DXOgs7mc + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dN9fsqlBJT ++ mktemp + local LAST_ERR=/tmp/tmp.cRiwRyZxxC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dN9fsqlBJT + cat /tmp/tmp.cRiwRyZxxC + rm /tmp/tmp.dN9fsqlBJT /tmp/tmp.cRiwRyZxxC + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.IASA2DNEz2 ++ mktemp + local LAST_ERR=/tmp/tmp.zgZ07AtYy4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IASA2DNEz2 + cat /tmp/tmp.zgZ07AtYy4 + rm /tmp/tmp.IASA2DNEz2 /tmp/tmp.zgZ07AtYy4 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jSSSgKeVtw ++ mktemp + local LAST_ERR=/tmp/tmp.gzeHGIPEne + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jSSSgKeVtw + cat /tmp/tmp.gzeHGIPEne + rm /tmp/tmp.jSSSgKeVtw /tmp/tmp.gzeHGIPEne + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Er671Aac3J ++ mktemp + local LAST_ERR=/tmp/tmp.StUZSsa600 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Er671Aac3J clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.StUZSsa600 + rm /tmp/tmp.Er671Aac3J /tmp/tmp.StUZSsa600 + return 0 + check_crd_for_deletion PR-1993-9d545650 + local git_tag=PR-1993-9d545650 ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1993-9d545650/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B9zUqwXWqi +++ mktemp ++ local LAST_ERR=/tmp/tmp.y6j3LTjQxN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.B9zUqwXWqi ++ cat /tmp/tmp.y6j3LTjQxN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.B9zUqwXWqi ++ cat /tmp/tmp.y6j3LTjQxN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.B9zUqwXWqi ++ cat /tmp/tmp.y6j3LTjQxN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.B9zUqwXWqi ++ cat /tmp/tmp.y6j3LTjQxN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.B9zUqwXWqi /tmp/tmp.y6j3LTjQxN ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns=psmdb-operator + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n psmdb-operator ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace psmdb-operator release "chaos-mesh" uninstalled ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' + local name=chaos-operator-pod-failure ++ awk '{print $3}' ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' + local namespace=psmdb-operator + kubectl patch PodChaos chaos-operator-pod-failure -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-operator-pod-failure patched + read -r line ++ awk '{print $1}' ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' + local name=chaos-operator-pod-kill ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' ++ awk '{print $3}' + local namespace=psmdb-operator + kubectl patch PodChaos chaos-operator-pod-kill -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-operator-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-operator-pod-failure" deleted podchaos.chaos-mesh.org "chaos-operator-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.pHjDd1pJh1 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.URsLnjmDgP + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.ZGwhFl5CFE ++ seq 0 2 + for i in '$(seq 0 2)' + set +e ++ mktemp + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_ERR=/tmp/tmp.OQkStt8kg4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZGwhFl5CFE + cat /tmp/tmp.OQkStt8kg4 + rm /tmp/tmp.ZGwhFl5CFE /tmp/tmp.OQkStt8kg4 + return 0 namespace "operator-self-healing-chaos-8881" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pHjDd1pJh1 namespace "psmdb-operator" deleted + cat /tmp/tmp.URsLnjmDgP + rm /tmp/tmp.pHjDd1pJh1 /tmp/tmp.URsLnjmDgP + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pDdS6VXekH ++ mktemp + local LAST_ERR=/tmp/tmp.GAsbWflTYF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pDdS6VXekH + cat /tmp/tmp.GAsbWflTYF + rm /tmp/tmp.pDdS6VXekH /tmp/tmp.GAsbWflTYF + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fv1iBgujRm ++ mktemp + local LAST_ERR=/tmp/tmp.zZ9D3D0fKd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fv1iBgujRm namespace/psmdb-operator created + cat /tmp/tmp.zZ9D3D0fKd + rm /tmp/tmp.fv1iBgujRm /tmp/tmp.zZ9D3D0fKd + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.dqaWMR5xK6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sg88qdHV81 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dqaWMR5xK6 ++ cat /tmp/tmp.Sg88qdHV81 ++ rm /tmp/tmp.dqaWMR5xK6 /tmp/tmp.Sg88qdHV81 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.eHx5edcyLA ++ mktemp + local LAST_ERR=/tmp/tmp.RaoMQL7R5y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eHx5edcyLA Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3" modified. + cat /tmp/tmp.RaoMQL7R5y + rm /tmp/tmp.eHx5edcyLA /tmp/tmp.RaoMQL7R5y + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/operator-self-healing-chaos/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.bDh03S6aFQ ++ mktemp + local LAST_ERR=/tmp/tmp.msYFUICDRg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bDh03S6aFQ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.msYFUICDRg + rm /tmp/tmp.bDh03S6aFQ /tmp/tmp.msYFUICDRg + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Tbz66q43q0 ++ mktemp + local LAST_ERR=/tmp/tmp.Oq0LNHel90 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tbz66q43q0 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.Oq0LNHel90 + rm /tmp/tmp.Tbz66q43q0 /tmp/tmp.Oq0LNHel90 + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1993-9d545650") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.L6iXN8AvRC ++ mktemp + local LAST_ERR=/tmp/tmp.A7afOT0jww + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L6iXN8AvRC deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.A7afOT0jww + rm /tmp/tmp.L6iXN8AvRC /tmp/tmp.A7afOT0jww + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ITZiNZ1Ute +++ mktemp ++ local LAST_ERR=/tmp/tmp.FVHzva10FM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ITZiNZ1Ute ++ cat /tmp/tmp.FVHzva10FM ++ rm /tmp/tmp.ITZiNZ1Ute /tmp/tmp.FVHzva10FM ++ return 0 + wait_pod percona-server-mongodb-operator-7dbb56857b-nzshj + local pod=percona-server-mongodb-operator-7dbb56857b-nzshj + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dbb56857b-nzshj to be ready..OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.FhhvBWHITS +++ mktemp ++ local LAST_ERR=/tmp/tmp.xAvCPZCNuk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FhhvBWHITS ++ cat /tmp/tmp.xAvCPZCNuk ++ rm /tmp/tmp.FhhvBWHITS /tmp/tmp.xAvCPZCNuk ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-7dbb56857b-nzshj ++ mktemp + local LAST_OUT=/tmp/tmp.aT0RuXkZlK ++ mktemp + local LAST_ERR=/tmp/tmp.JdwEvCxXDs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-7dbb56857b-nzshj + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aT0RuXkZlK + cat /tmp/tmp.JdwEvCxXDs + rm /tmp/tmp.aT0RuXkZlK /tmp/tmp.JdwEvCxXDs + return 0 2025-07-04T03:30:32.509Z INFO setup Manager starting up {"gitCommit": "9d5456508def1bb97ffa7da79b450ff41a98b5fb", "gitBranch": "PR-1993-9d545650", "buildTime": "", "goVersion": "go1.24.4", "os": "linux", "arch": "amd64"} + create_namespace operator-self-healing-chaos-22383 + local namespace=operator-self-healing-chaos-22383 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces operator-self-healing-chaos-22383' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces operator-self-healing-chaos-22383 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace operator-self-healing-chaos-22383 --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_OUT=/tmp/tmp.KJavQ2ToB6 ++ mktemp + local LAST_ERR=/tmp/tmp.6g8LKk2yiK + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_OUT=/tmp/tmp.Wxno4wy7S3 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace operator-self-healing-chaos-22383 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.GzrSa2MYFK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KJavQ2ToB6 + cat /tmp/tmp.6g8LKk2yiK + rm /tmp/tmp.KJavQ2ToB6 /tmp/tmp.6g8LKk2yiK + return 0 + kubectl_bin wait --for=delete namespace operator-self-healing-chaos-22383 ++ mktemp + local LAST_OUT=/tmp/tmp.qioLGKnS5p ++ mktemp + local LAST_ERR=/tmp/tmp.QADxgsZk14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace operator-self-healing-chaos-22383 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wxno4wy7S3 + cat /tmp/tmp.GzrSa2MYFK + rm /tmp/tmp.Wxno4wy7S3 /tmp/tmp.GzrSa2MYFK + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qioLGKnS5p + cat /tmp/tmp.QADxgsZk14 + rm /tmp/tmp.qioLGKnS5p /tmp/tmp.QADxgsZk14 + return 0 + desc 'create namespace operator-self-healing-chaos-22383' + set +o xtrace ----------------------------------------------------------------------------------- create namespace operator-self-healing-chaos-22383 ----------------------------------------------------------------------------------- + kubectl_bin create namespace operator-self-healing-chaos-22383 ++ mktemp + local LAST_OUT=/tmp/tmp.xzAvi7EuA6 ++ mktemp + local LAST_ERR=/tmp/tmp.hAltclAsst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace operator-self-healing-chaos-22383 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xzAvi7EuA6 namespace/operator-self-healing-chaos-22383 created + cat /tmp/tmp.hAltclAsst + rm /tmp/tmp.xzAvi7EuA6 /tmp/tmp.hAltclAsst + return 0 + set_kube_ctx operator-self-healing-chaos-22383 + local namespace=operator-self-healing-chaos-22383 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LlAGG6VfgL +++ mktemp ++ local LAST_ERR=/tmp/tmp.0t2lxSHFh0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LlAGG6VfgL ++ cat /tmp/tmp.0t2lxSHFh0 ++ rm /tmp/tmp.LlAGG6VfgL /tmp/tmp.0t2lxSHFh0 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3 --namespace=operator-self-healing-chaos-22383 ++ mktemp + local LAST_OUT=/tmp/tmp.cJmEkcdf2T ++ mktemp + local LAST_ERR=/tmp/tmp.7FpoF5dZY3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3 --namespace=operator-self-healing-chaos-22383 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cJmEkcdf2T Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3" modified. + cat /tmp/tmp.7FpoF5dZY3 + rm /tmp/tmp.cJmEkcdf2T /tmp/tmp.7FpoF5dZY3 + return 0 + test_namespace=operator-self-healing-chaos-22383 + '[' -n psmdb-operator ']' + kubectl_bin patch clusterrole percona-server-mongodb-operator --type=json -p '[{"op":"remove","path":"/rules/1"}]' ++ mktemp + local LAST_OUT=/tmp/tmp.gQxshP3c0h ++ mktemp + local LAST_ERR=/tmp/tmp.vdhhHP0uHk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch clusterrole percona-server-mongodb-operator --type=json -p '[{"op":"remove","path":"/rules/1"}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gQxshP3c0h clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator patched + cat /tmp/tmp.vdhhHP0uHk + rm /tmp/tmp.gQxshP3c0h /tmp/tmp.vdhhHP0uHk + return 0 + test_namespace=psmdb-operator + deploy_chaos_mesh psmdb-operator + local chaos_mesh_ns=psmdb-operator + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" already exists with the same configuration, skipping + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=psmdb-operator --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.7.1 NAME: chaos-mesh LAST DEPLOYED: Fri Jul 4 03:34:18 2025 NAMESPACE: psmdb-operator STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace psmdb-operator -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + setup_cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.JpQtj5zWeW ++ mktemp + local LAST_ERR=/tmp/tmp.Qdh3jwLgjp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JpQtj5zWeW secret/some-users created + cat /tmp/tmp.Qdh3jwLgjp + rm /tmp/tmp.JpQtj5zWeW /tmp/tmp.Qdh3jwLgjp + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.C434NB8z5D ++ mktemp + local LAST_ERR=/tmp/tmp.OkCpo2oYyW + local exit_status=0 + local timeout=4 + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1993-9d545650"' ++ seq 0 2 + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/some-name-rs0.yml + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C434NB8z5D perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.OkCpo2oYyW + rm /tmp/tmp.C434NB8z5D /tmp/tmp.OkCpo2oYyW + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ORCFqJ2ep +++ mktemp ++ local LAST_ERR=/tmp/tmp.PYhPmJOPDz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ORCFqJ2ep ++ cat /tmp/tmp.PYhPmJOPDz ++ rm /tmp/tmp.2ORCFqJ2ep /tmp/tmp.PYhPmJOPDz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.52rxLKKrDJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.QEc19X3oiH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.52rxLKKrDJ ++ cat /tmp/tmp.QEc19X3oiH ++ rm /tmp/tmp.52rxLKKrDJ /tmp/tmp.QEc19X3oiH ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Apk60Nz5tC +++ mktemp ++ local LAST_ERR=/tmp/tmp.tKD3KiMrtV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Apk60Nz5tC ++ cat /tmp/tmp.tKD3KiMrtV ++ rm /tmp/tmp.Apk60Nz5tC /tmp/tmp.tKD3KiMrtV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4eTNxTWILT +++ mktemp ++ local LAST_ERR=/tmp/tmp.VafzjOtT7i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4eTNxTWILT ++ cat /tmp/tmp.VafzjOtT7i ++ rm /tmp/tmp.4eTNxTWILT /tmp/tmp.VafzjOtT7i ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'kill operator' + set +o xtrace ----------------------------------------------------------------------------------- kill operator ----------------------------------------------------------------------------------- + kill_pod ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.IwIdOu7XdN +++ mktemp ++ local LAST_ERR=/tmp/tmp.l5nFn6wNw7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IwIdOu7XdN ++ cat /tmp/tmp.l5nFn6wNw7 ++ rm /tmp/tmp.IwIdOu7XdN /tmp/tmp.l5nFn6wNw7 ++ return 0 + local init_pod=percona-server-mongodb-operator-7dbb56857b-9vt2h + kubectl apply --namespace psmdb-operator -f - + yq eval ' .metadata.name = "chaos-operator-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.psmdb-operator[0] = "percona-server-mongodb-operator-7dbb56857b-9vt2h"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/chaos-pod-kill.yml podchaos.chaos-mesh.org/chaos-operator-pod-kill created + sleep 10 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.iLaCxSS4ns +++ mktemp ++ local LAST_ERR=/tmp/tmp.bHiNz8qjGi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iLaCxSS4ns ++ cat /tmp/tmp.bHiNz8qjGi ++ rm /tmp/tmp.iLaCxSS4ns /tmp/tmp.bHiNz8qjGi ++ return 0 + '[' percona-server-mongodb-operator-7dbb56857b-9vt2h == percona-server-mongodb-operator-7dbb56857b-mx62k ']' + desc 'check if operator works fine: scale up from 3 to 5' + set +o xtrace ----------------------------------------------------------------------------------- check if operator works fine: scale up from 3 to 5 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]' ++ mktemp + local LAST_OUT=/tmp/tmp.m9pIBpq5Ud ++ mktemp + local LAST_ERR=/tmp/tmp.We9jjCNdDU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m9pIBpq5Ud perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.We9jjCNdDU + rm /tmp/tmp.m9pIBpq5Ud /tmp/tmp.We9jjCNdDU + return 0 + sleep 10 + wait_for_running some-name-rs0 5 + local name=some-name-rs0 + let last_pod=4 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 4 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 4 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 4 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 4 ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 3 -eq 4 ]] + wait_pod some-name-rs0-3 + local pod=some-name-rs0-3 + set +o xtrace waiting for pod/some-name-rs0-3 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 4 -eq 4 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rfr2FWtqos +++ mktemp ++ local LAST_ERR=/tmp/tmp.ps8flkODMs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rfr2FWtqos ++ cat /tmp/tmp.ps8flkODMs ++ rm /tmp/tmp.rfr2FWtqos /tmp/tmp.ps8flkODMs ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-4 + local pod=some-name-rs0-4 + set +o xtrace waiting for pod/some-name-rs0-4 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uP5a33bll0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TWfZFRlLNS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uP5a33bll0 ++ cat /tmp/tmp.TWfZFRlLNS ++ rm /tmp/tmp.uP5a33bll0 /tmp/tmp.TWfZFRlLNS ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VU3VpksZI4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vyxap9jcez ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VU3VpksZI4 ++ cat /tmp/tmp.vyxap9jcez ++ rm /tmp/tmp.VU3VpksZI4 /tmp/tmp.vyxap9jcez ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LIUiJ0uccA +++ mktemp ++ local LAST_ERR=/tmp/tmp.h1adtKUbnq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LIUiJ0uccA ++ cat /tmp/tmp.h1adtKUbnq ++ rm /tmp/tmp.LIUiJ0uccA /tmp/tmp.h1adtKUbnq ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'fail operator pod for 60s' + set +o xtrace ----------------------------------------------------------------------------------- fail operator pod for 60s ----------------------------------------------------------------------------------- + fail_pod ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.upI8j4DPls +++ mktemp ++ local LAST_ERR=/tmp/tmp.1WLyUBuyxu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.upI8j4DPls ++ cat /tmp/tmp.1WLyUBuyxu ++ rm /tmp/tmp.upI8j4DPls /tmp/tmp.1WLyUBuyxu ++ return 0 + local init_pod=percona-server-mongodb-operator-7dbb56857b-mx62k ++ kubectl_bin get pod -n psmdb-operator percona-server-mongodb-operator-7dbb56857b-mx62k '-ojsonpath={.status.containerStatuses[0].restartCount}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ke4TV71PMK +++ mktemp ++ local LAST_ERR=/tmp/tmp.selKuVaGXZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n psmdb-operator percona-server-mongodb-operator-7dbb56857b-mx62k '-ojsonpath={.status.containerStatuses[0].restartCount}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ke4TV71PMK ++ cat /tmp/tmp.selKuVaGXZ ++ rm /tmp/tmp.Ke4TV71PMK /tmp/tmp.selKuVaGXZ ++ return 0 + local restart_count_before=0 + kubectl apply --namespace psmdb-operator -f - + yq eval ' .metadata.name = "chaos-operator-pod-failure" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.psmdb-operator[0] = "percona-server-mongodb-operator-7dbb56857b-mx62k"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/chaos-pod-failure.yml podchaos.chaos-mesh.org/chaos-operator-pod-failure created + sleep 10 + desc 'check if operator works fine: scale down from 5 to 3' + set +o xtrace ----------------------------------------------------------------------------------- check if operator works fine: scale down from 5 to 3 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 3}]' ++ mktemp + local LAST_OUT=/tmp/tmp.MMgGxSTMYk ++ mktemp + local LAST_ERR=/tmp/tmp.iiFrCOruyj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 3}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MMgGxSTMYk perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.iiFrCOruyj + rm /tmp/tmp.MMgGxSTMYk /tmp/tmp.iiFrCOruyj + return 0 + sleep 60 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.tMX4WGO4Rr +++ mktemp ++ local LAST_ERR=/tmp/tmp.t4uCBmWSDl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tMX4WGO4Rr ++ cat /tmp/tmp.t4uCBmWSDl ++ rm /tmp/tmp.tMX4WGO4Rr /tmp/tmp.t4uCBmWSDl ++ return 0 + local pod=percona-server-mongodb-operator-7dbb56857b-mx62k ++ kubectl_bin get pod -n psmdb-operator percona-server-mongodb-operator-7dbb56857b-mx62k '-ojsonpath={.status.containerStatuses[0].restartCount}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VCgyA5bTm6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IcHQcaLl1Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n psmdb-operator percona-server-mongodb-operator-7dbb56857b-mx62k '-ojsonpath={.status.containerStatuses[0].restartCount}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VCgyA5bTm6 ++ cat /tmp/tmp.IcHQcaLl1Z ++ rm /tmp/tmp.VCgyA5bTm6 /tmp/tmp.IcHQcaLl1Z ++ return 0 + local restart_count_after=5 + '[' percona-server-mongodb-operator-7dbb56857b-mx62k '!=' percona-server-mongodb-operator-7dbb56857b-mx62k ']' + '[' 0 -eq 5 ']' + '[' -n psmdb-operator ']' + kubectl_bin config set-context --current --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0pnZlVF5aO ++ mktemp + local LAST_ERR=/tmp/tmp.X3jZFoVrWL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context --current --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0pnZlVF5aO Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3" modified. + cat /tmp/tmp.X3jZFoVrWL + rm /tmp/tmp.0pnZlVF5aO /tmp/tmp.X3jZFoVrWL + return 0 + wait_pod percona-server-mongodb-operator-7dbb56857b-mx62k + local pod=percona-server-mongodb-operator-7dbb56857b-mx62k + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dbb56857b-mx62k to be ready.OK + sleep 10 + kubectl_bin config set-context --current --namespace=operator-self-healing-chaos-22383 ++ mktemp + local LAST_OUT=/tmp/tmp.f8iWsPImfb ++ mktemp + local LAST_ERR=/tmp/tmp.SgsAY1gPFg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context --current --namespace=operator-self-healing-chaos-22383 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f8iWsPImfb Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster3" modified. + cat /tmp/tmp.SgsAY1gPFg + rm /tmp/tmp.f8iWsPImfb /tmp/tmp.SgsAY1gPFg + return 0 + wait_for_delete pod/some-name-rs0-3 + local res=pod/some-name-rs0-3 + local wait_time=60 + set +o xtrace waiting for pod/some-name-rs0-3 to be deletedError from server (NotFound): pods "some-name-rs0-3" not found Error from server (NotFound): pods "some-name-rs0-3" not found Error from server (NotFound): pods "some-name-rs0-3" not found Error from server (NotFound): pods "some-name-rs0-3" not found + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y3auGRnD0u +++ mktemp ++ local LAST_ERR=/tmp/tmp.oNIsfRoH5k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y3auGRnD0u ++ cat /tmp/tmp.oNIsfRoH5k ++ rm /tmp/tmp.Y3auGRnD0u /tmp/tmp.oNIsfRoH5k ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NwLoTt0rhv +++ mktemp ++ local LAST_ERR=/tmp/tmp.rKTG57GKy0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NwLoTt0rhv ++ cat /tmp/tmp.rKTG57GKy0 ++ rm /tmp/tmp.NwLoTt0rhv /tmp/tmp.rKTG57GKy0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HN4P6TI5m2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GA0ikjTF2h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HN4P6TI5m2 ++ cat /tmp/tmp.GA0ikjTF2h ++ rm /tmp/tmp.HN4P6TI5m2 /tmp/tmp.GA0ikjTF2h ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rKqf7fQ9lC +++ mktemp ++ local LAST_ERR=/tmp/tmp.EXH3lgkIlN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rKqf7fQ9lC ++ cat /tmp/tmp.EXH3lgkIlN ++ rm /tmp/tmp.rKqf7fQ9lC /tmp/tmp.EXH3lgkIlN ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'emulate bad network for 60s' + set +o xtrace ----------------------------------------------------------------------------------- emulate bad network for 60s ----------------------------------------------------------------------------------- + network_loss ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZmumCPNGgo +++ mktemp ++ local LAST_ERR=/tmp/tmp.lipriMmWeb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZmumCPNGgo ++ cat /tmp/tmp.lipriMmWeb ++ rm /tmp/tmp.ZmumCPNGgo /tmp/tmp.lipriMmWeb ++ return 0 + local init_pod=percona-server-mongodb-operator-7dbb56857b-mx62k + kubectl apply --namespace psmdb-operator -f - + yq eval ' .metadata.name = "chaos-operator-network" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.psmdb-operator[0] = "percona-server-mongodb-operator-7dbb56857b-mx62k"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/chaos-network-loss.yml networkchaos.chaos-mesh.org/chaos-operator-network created + sleep 10 + desc 'check if operator works fine: scale up from 3 to 5' + set +o xtrace ----------------------------------------------------------------------------------- check if operator works fine: scale up from 3 to 5 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]' ++ mktemp + local LAST_OUT=/tmp/tmp.YmL5oQ6mhb ++ mktemp + local LAST_ERR=/tmp/tmp.V2cd1zPZ4F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YmL5oQ6mhb perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.V2cd1zPZ4F + rm /tmp/tmp.YmL5oQ6mhb /tmp/tmp.V2cd1zPZ4F + return 0 + sleep 10 + wait_for_running some-name-rs0 5 + local name=some-name-rs0 + let last_pod=4 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 4 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 4 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 4 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 4 ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 3 -eq 4 ]] + wait_pod some-name-rs0-3 + local pod=some-name-rs0-3 + set +o xtrace waiting for pod/some-name-rs0-3 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 4 -eq 4 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TGS0MeglDy +++ mktemp ++ local LAST_ERR=/tmp/tmp.AJSbrrZ3nB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TGS0MeglDy ++ cat /tmp/tmp.AJSbrrZ3nB ++ rm /tmp/tmp.TGS0MeglDy /tmp/tmp.AJSbrrZ3nB ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-4 + local pod=some-name-rs0-4 + set +o xtrace waiting for pod/some-name-rs0-4 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a26kUft9UH +++ mktemp ++ local LAST_ERR=/tmp/tmp.rBOUNiRKSC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a26kUft9UH ++ cat /tmp/tmp.rBOUNiRKSC ++ rm /tmp/tmp.a26kUft9UH /tmp/tmp.rBOUNiRKSC ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ND2rRmrKq +++ mktemp ++ local LAST_ERR=/tmp/tmp.PRFnsLNsrs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ND2rRmrKq ++ cat /tmp/tmp.PRFnsLNsrs ++ rm /tmp/tmp.2ND2rRmrKq /tmp/tmp.PRFnsLNsrs ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9SSYgGzJPi +++ mktemp ++ local LAST_ERR=/tmp/tmp.470qV3yJnw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9SSYgGzJPi ++ cat /tmp/tmp.470qV3yJnw ++ rm /tmp/tmp.9SSYgGzJPi /tmp/tmp.470qV3yJnw ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// + local chaos_mesh_ns=psmdb-operator + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n psmdb-operator ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace psmdb-operator release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'NetworkChaos chaos-operator-network psmdb-operator' + local kind=NetworkChaos ++ echo 'NetworkChaos chaos-operator-network psmdb-operator' ++ awk '{print $2}' + local name=chaos-operator-network ++ echo 'NetworkChaos chaos-operator-network psmdb-operator' ++ awk '{print $3}' + local namespace=psmdb-operator + kubectl patch NetworkChaos chaos-operator-network -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' networkchaos.chaos-mesh.org/chaos-operator-network patched + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces networkchaos.chaos-mesh.org "chaos-operator-network" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' ++ awk '{print $1}' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' + local name=chaos-operator-pod-failure ++ awk '{print $3}' ++ echo 'PodChaos chaos-operator-pod-failure psmdb-operator' + local namespace=psmdb-operator + kubectl patch PodChaos chaos-operator-pod-failure -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-operator-pod-failure patched + read -r line ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' ++ awk '{print $1}' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' + local name=chaos-operator-pod-kill ++ awk '{print $3}' ++ echo 'PodChaos chaos-operator-pod-kill psmdb-operator' + local namespace=psmdb-operator + kubectl patch PodChaos chaos-operator-pod-kill -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-operator-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-operator-pod-failure" deleted podchaos.chaos-mesh.org "chaos-operator-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodNetworkChaos percona-server-mongodb-operator-7dbb56857b-mx62k psmdb-operator' ++ awk '{print $1}' + local kind=PodNetworkChaos ++ echo 'PodNetworkChaos percona-server-mongodb-operator-7dbb56857b-mx62k psmdb-operator' ++ awk '{print $2}' + local name=percona-server-mongodb-operator-7dbb56857b-mx62k ++ echo 'PodNetworkChaos percona-server-mongodb-operator-7dbb56857b-mx62k psmdb-operator' ++ awk '{print $3}' + local namespace=psmdb-operator + kubectl patch PodNetworkChaos percona-server-mongodb-operator-7dbb56857b-mx62k -n psmdb-operator --type=merge -p '{"metadata":{"finalizers":[]}}' podnetworkchaos.chaos-mesh.org/percona-server-mongodb-operator-7dbb56857b-mx62k patched (no change) + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces podnetworkchaos.chaos-mesh.org "percona-server-mongodb-operator-7dbb56857b-mx62k" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + destroy operator-self-healing-chaos-22383 + local namespace=operator-self-healing-chaos-22383 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.RnomvZNMHf +++ mktemp ++ local LAST_ERR=/tmp/tmp.ne7tSMMuLi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RnomvZNMHf ++ cat /tmp/tmp.ne7tSMMuLi No resources found in operator-self-healing-chaos-22383 namespace. ++ rm /tmp/tmp.RnomvZNMHf /tmp/tmp.ne7tSMMuLi ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.FwBje8sgAe ++ mktemp + local LAST_ERR=/tmp/tmp.DmEJIEIFeS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FwBje8sgAe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.DmEJIEIFeS + rm /tmp/tmp.FwBje8sgAe /tmp/tmp.DmEJIEIFeS + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ybeSM3bIKH ++ mktemp + local LAST_ERR=/tmp/tmp.7yYZ5rmkX4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ybeSM3bIKH + cat /tmp/tmp.7yYZ5rmkX4 + rm /tmp/tmp.ybeSM3bIKH /tmp/tmp.7yYZ5rmkX4 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dRB9yc0ePt ++ mktemp + local LAST_ERR=/tmp/tmp.u5ugG4VUbh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dRB9yc0ePt + cat /tmp/tmp.u5ugG4VUbh + rm /tmp/tmp.dRB9yc0ePt /tmp/tmp.u5ugG4VUbh + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7qVz6HtACQ ++ mktemp + local LAST_ERR=/tmp/tmp.qEbUAKa2Bi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7qVz6HtACQ + cat /tmp/tmp.qEbUAKa2Bi + rm /tmp/tmp.7qVz6HtACQ /tmp/tmp.qEbUAKa2Bi + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.UzxgsQD9n0 ++ mktemp + local LAST_ERR=/tmp/tmp.zbc5PkMA9O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UzxgsQD9n0 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.zbc5PkMA9O + rm /tmp/tmp.UzxgsQD9n0 /tmp/tmp.zbc5PkMA9O + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Q6hVQ1q4rU ++ mktemp + local LAST_ERR=/tmp/tmp.HI8xnKBbrf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Q6hVQ1q4rU + cat /tmp/tmp.HI8xnKBbrf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Q6hVQ1q4rU + cat /tmp/tmp.HI8xnKBbrf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Q6hVQ1q4rU + cat /tmp/tmp.HI8xnKBbrf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.Q6hVQ1q4rU + cat /tmp/tmp.HI8xnKBbrf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.Q6hVQ1q4rU /tmp/tmp.HI8xnKBbrf + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.wwNFOPQKsw + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace operator-self-healing-chaos-22383 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.193Osgvb0d + local LAST_OUT=/tmp/tmp.T7i2deTKRz ++ mktemp + local LAST_ERR=/tmp/tmp.cKgEqcQEYP + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.Pi0FVn1TzV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace operator-self-healing-chaos-22383 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.193Osgvb0d namespace "psmdb-operator" force deleted + cat /tmp/tmp.cKgEqcQEYP Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.193Osgvb0d /tmp/tmp.cKgEqcQEYP + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T7i2deTKRz namespace "operator-self-healing-chaos-22383" force deleted + cat /tmp/tmp.Pi0FVn1TzV Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.T7i2deTKRz /tmp/tmp.Pi0FVn1TzV + return 0