Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/logs/self-healing-chaos.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 + cluster=some-name-rs0 + main + create_infra self-healing-chaos-32328 + local ns=self-healing-chaos-32328 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.j2IQszQSvW ++ mktemp + local LAST_ERR=/tmp/tmp.0P3SUuWDJv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j2IQszQSvW customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.0P3SUuWDJv + rm /tmp/tmp.j2IQszQSvW /tmp/tmp.0P3SUuWDJv + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.kDeqjnfKfX ++ mktemp + local LAST_ERR=/tmp/tmp.DCada5bX1q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kDeqjnfKfX + cat /tmp/tmp.DCada5bX1q + rm /tmp/tmp.kDeqjnfKfX /tmp/tmp.DCada5bX1q + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.M0kH4FMHLs ++ mktemp + local LAST_ERR=/tmp/tmp.Cq5RXA4lOU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M0kH4FMHLs + cat /tmp/tmp.Cq5RXA4lOU + rm /tmp/tmp.M0kH4FMHLs /tmp/tmp.Cq5RXA4lOU + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JrXnTDOb2D ++ mktemp + local LAST_ERR=/tmp/tmp.4vZAzglyoQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JrXnTDOb2D + cat /tmp/tmp.4vZAzglyoQ + rm /tmp/tmp.JrXnTDOb2D /tmp/tmp.4vZAzglyoQ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.4ThNhu4unz ++ mktemp + local LAST_ERR=/tmp/tmp.FEssmpD1EL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4ThNhu4unz clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.FEssmpD1EL + rm /tmp/tmp.4ThNhu4unz /tmp/tmp.FEssmpD1EL + return 0 + check_crd_for_deletion PR-1585-fdd2d1e6 + local git_tag=PR-1585-fdd2d1e6 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1585-fdd2d1e6/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gz5sgPgC2x +++ mktemp ++ local LAST_ERR=/tmp/tmp.jcfY6DRNqr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.gz5sgPgC2x ++ cat /tmp/tmp.jcfY6DRNqr Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.gz5sgPgC2x ++ cat /tmp/tmp.jcfY6DRNqr Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.gz5sgPgC2x ++ cat /tmp/tmp.jcfY6DRNqr Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.gz5sgPgC2x ++ cat /tmp/tmp.jcfY6DRNqr Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.gz5sgPgC2x /tmp/tmp.jcfY6DRNqr ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns=self-healing-chaos-30777 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-30777 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-30777 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30777' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30777' ++ awk '{print $2}' + local name=chaos-cluster-pod-kill ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-30777' ++ awk '{print $3}' + local namespace=self-healing-chaos-30777 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-30777 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.SE9gYZtpd1 ++ mktemp + local LAST_OUT=/tmp/tmp.pL1btMte1E ++ mktemp + local LAST_ERR=/tmp/tmp.4U6hOqRjMa + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.rPXeVF1tCq + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SE9gYZtpd1 + cat /tmp/tmp.4U6hOqRjMa + rm /tmp/tmp.SE9gYZtpd1 /tmp/tmp.4U6hOqRjMa + return 0 namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "self-healing-chaos-30777" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pL1btMte1E namespace "psmdb-operator" deleted + cat /tmp/tmp.rPXeVF1tCq + rm /tmp/tmp.pL1btMte1E /tmp/tmp.rPXeVF1tCq + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7gxdqoy7UQ ++ mktemp + local LAST_ERR=/tmp/tmp.jEyvEVani8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7gxdqoy7UQ + cat /tmp/tmp.jEyvEVani8 + rm /tmp/tmp.7gxdqoy7UQ /tmp/tmp.jEyvEVani8 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QBhSZDKAQz ++ mktemp + local LAST_ERR=/tmp/tmp.zYGVXFNui8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QBhSZDKAQz namespace/psmdb-operator created + cat /tmp/tmp.zYGVXFNui8 + rm /tmp/tmp.QBhSZDKAQz /tmp/tmp.zYGVXFNui8 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.AO9nmSOmwo +++ mktemp ++ local LAST_ERR=/tmp/tmp.pejrWD0WYd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AO9nmSOmwo ++ cat /tmp/tmp.pejrWD0WYd ++ rm /tmp/tmp.AO9nmSOmwo /tmp/tmp.pejrWD0WYd ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vBWUySMNgb ++ mktemp + local LAST_ERR=/tmp/tmp.oH8yaEiBc8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vBWUySMNgb Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9" modified. + cat /tmp/tmp.oH8yaEiBc8 + rm /tmp/tmp.vBWUySMNgb /tmp/tmp.oH8yaEiBc8 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WwqMtrovXt ++ mktemp + local LAST_ERR=/tmp/tmp.ewJ6gqT8ix + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WwqMtrovXt customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.ewJ6gqT8ix + rm /tmp/tmp.WwqMtrovXt /tmp/tmp.ewJ6gqT8ix + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-rbac.yaml + local LAST_OUT=/tmp/tmp.to8DI5JIkI ++ mktemp + local LAST_ERR=/tmp/tmp.57GkraI5YJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.to8DI5JIkI clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.57GkraI5YJ + rm /tmp/tmp.to8DI5JIkI /tmp/tmp.57GkraI5YJ + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1585-fdd2d1e6") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.HAdjKEtRos ++ mktemp + local LAST_ERR=/tmp/tmp.jh8UaipU91 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HAdjKEtRos deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.jh8UaipU91 + rm /tmp/tmp.HAdjKEtRos /tmp/tmp.jh8UaipU91 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.stoYFNaIfT +++ mktemp ++ local LAST_ERR=/tmp/tmp.pHU6WgkSom ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.stoYFNaIfT ++ cat /tmp/tmp.pHU6WgkSom ++ rm /tmp/tmp.stoYFNaIfT /tmp/tmp.pHU6WgkSom ++ return 0 + wait_pod percona-server-mongodb-operator-7d54595896-srvpv + local pod=percona-server-mongodb-operator-7d54595896-srvpv + set +o xtrace waiting for pod/percona-server-mongodb-operator-7d54595896-srvpv to be ready.OK + create_namespace self-healing-chaos-32328 + local namespace=self-healing-chaos-32328 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces self-healing-chaos-32328' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces self-healing-chaos-32328 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace self-healing-chaos-32328 --ignore-not-found + xargs kubectl delete ns ++ mktemp + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.kXiympInv8 + local LAST_OUT=/tmp/tmp.41B2x3O0AC ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.3rGvhKbIcO + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.1qjil7D9W4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace self-healing-chaos-32328 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.41B2x3O0AC + cat /tmp/tmp.1qjil7D9W4 + rm /tmp/tmp.41B2x3O0AC /tmp/tmp.1qjil7D9W4 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kXiympInv8 + cat /tmp/tmp.3rGvhKbIcO + rm /tmp/tmp.kXiympInv8 /tmp/tmp.3rGvhKbIcO + return 0 + kubectl_bin wait --for=delete namespace self-healing-chaos-32328 ++ mktemp + local LAST_OUT=/tmp/tmp.SpPIhBkeyI ++ mktemp + local LAST_ERR=/tmp/tmp.VDbspLXKby + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace self-healing-chaos-32328 namespace "gmp-public" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SpPIhBkeyI + cat /tmp/tmp.VDbspLXKby + rm /tmp/tmp.SpPIhBkeyI /tmp/tmp.VDbspLXKby + return 0 + desc 'create namespace self-healing-chaos-32328' + set +o xtrace ----------------------------------------------------------------------------------- create namespace self-healing-chaos-32328 ----------------------------------------------------------------------------------- + kubectl_bin create namespace self-healing-chaos-32328 ++ mktemp + local LAST_OUT=/tmp/tmp.g774S1dzup ++ mktemp + local LAST_ERR=/tmp/tmp.VOBvL6YGXd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace self-healing-chaos-32328 namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g774S1dzup namespace/self-healing-chaos-32328 created + cat /tmp/tmp.VOBvL6YGXd + rm /tmp/tmp.g774S1dzup /tmp/tmp.VOBvL6YGXd + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.McELeYG8yO +++ mktemp ++ local LAST_ERR=/tmp/tmp.VynPKnxcmG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.McELeYG8yO ++ cat /tmp/tmp.VynPKnxcmG ++ rm /tmp/tmp.McELeYG8yO /tmp/tmp.VynPKnxcmG ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9 --namespace=self-healing-chaos-32328 ++ mktemp + local LAST_OUT=/tmp/tmp.FcXYvNX7Lg ++ mktemp + local LAST_ERR=/tmp/tmp.FqjJuFOEpA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9 --namespace=self-healing-chaos-32328 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FcXYvNX7Lg Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1585-fdd2d1e6-13-cluster9" modified. + cat /tmp/tmp.FqjJuFOEpA + rm /tmp/tmp.FcXYvNX7Lg /tmp/tmp.FqjJuFOEpA + return 0 + deploy_chaos_mesh self-healing-chaos-32328 + local chaos_mesh_ns=self-healing-chaos-32328 + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" already exists with the same configuration, skipping + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=self-healing-chaos-32328 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 NAME: chaos-mesh LAST DEPLOYED: Mon Jul 22 17:04:02 2024 NAMESPACE: self-healing-chaos-32328 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace self-healing-chaos-32328 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + setup_cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Cx9A0XaB0l ++ mktemp + local LAST_ERR=/tmp/tmp.gXKLjT0lmn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cx9A0XaB0l secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.gXKLjT0lmn + rm /tmp/tmp.Cx9A0XaB0l /tmp/tmp.gXKLjT0lmn + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1585-fdd2d1e6"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.h6GpVTxCTY ++ mktemp + local LAST_ERR=/tmp/tmp.dyhK7Pwgfs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h6GpVTxCTY perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.dyhK7Pwgfs + rm /tmp/tmp.h6GpVTxCTY /tmp/tmp.dyhK7Pwgfs + return 0 + desc 'enable backups' + set +o xtrace ----------------------------------------------------------------------------------- enable backups ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.O7ALVzrwBp ++ mktemp + local LAST_ERR=/tmp/tmp.dOGHrJepc8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O7ALVzrwBp perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.dOGHrJepc8 + rm /tmp/tmp.O7ALVzrwBp /tmp/tmp.dOGHrJepc8 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xa0PGuDRUf +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xia4zgqdyI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xa0PGuDRUf ++ cat /tmp/tmp.Xia4zgqdyI ++ rm /tmp/tmp.Xa0PGuDRUf /tmp/tmp.Xia4zgqdyI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.................OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eyfp2nkP3d +++ mktemp ++ local LAST_ERR=/tmp/tmp.jz5XYDE22P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eyfp2nkP3d ++ cat /tmp/tmp.jz5XYDE22P ++ rm /tmp/tmp.eyfp2nkP3d /tmp/tmp.jz5XYDE22P ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............. + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-32328 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-32328 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UUR3yETejr +++ mktemp ++ local LAST_ERR=/tmp/tmp.1xEnlMjmvS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UUR3yETejr ++ cat /tmp/tmp.1xEnlMjmvS ++ rm /tmp/tmp.UUR3yETejr /tmp/tmp.1xEnlMjmvS ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GVxhx4Kxyt ++ mktemp + local LAST_ERR=/tmp/tmp.Y6Cre0apYP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GVxhx4Kxyt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("29ece9e2-3143-410e-93e7-ab7dff6e46e6") } Percona Server for MongoDB server version: v7.0.5-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Y6Cre0apYP + rm /tmp/tmp.GVxhx4Kxyt /tmp/tmp.Y6Cre0apYP + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fyvxHrK9on +++ mktemp ++ local LAST_ERR=/tmp/tmp.4JEZxWNA95 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fyvxHrK9on ++ cat /tmp/tmp.4JEZxWNA95 ++ rm /tmp/tmp.fyvxHrK9on /tmp/tmp.4JEZxWNA95 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.huqP2amE6a ++ mktemp + local LAST_ERR=/tmp/tmp.Rv2UKHwcKY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.huqP2amE6a Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c02aec04-4dc6-496b-83dc-775fb03b5fbd") } Percona Server for MongoDB server version: v7.0.5-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Rv2UKHwcKY + rm /tmp/tmp.huqP2amE6a /tmp/tmp.Rv2UKHwcKY + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6VoOIWNSQW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q2B6ME3WEU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6VoOIWNSQW ++ cat /tmp/tmp.Q2B6ME3WEU ++ rm /tmp/tmp.6VoOIWNSQW /tmp/tmp.Q2B6ME3WEU ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CWSN2zvFFA ++ mktemp + local LAST_ERR=/tmp/tmp.Yob2wJMZCd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CWSN2zvFFA + cat /tmp/tmp.Yob2wJMZCd + rm /tmp/tmp.CWSN2zvFFA /tmp/tmp.Yob2wJMZCd + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 mongodb '' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0yinTZfoyM +++ mktemp ++ local LAST_ERR=/tmp/tmp.W7OUB9WQL7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0yinTZfoyM ++ cat /tmp/tmp.W7OUB9WQL7 ++ rm /tmp/tmp.0yinTZfoyM /tmp/tmp.W7OUB9WQL7 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Elcm4IOgKB ++ mktemp + local LAST_ERR=/tmp/tmp.l7k7Q0xJQy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Elcm4IOgKB + cat /tmp/tmp.l7k7Q0xJQy + rm /tmp/tmp.Elcm4IOgKB /tmp/tmp.l7k7Q0xJQy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.93Pz6AS2zX +++ mktemp ++ local LAST_ERR=/tmp/tmp.AVcJJ3ktA2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.93Pz6AS2zX ++ cat /tmp/tmp.AVcJJ3ktA2 ++ rm /tmp/tmp.93Pz6AS2zX /tmp/tmp.AVcJJ3ktA2 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PRF5CwVZZR ++ mktemp + local LAST_ERR=/tmp/tmp.yrjrqI5Q0J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PRF5CwVZZR + cat /tmp/tmp.yrjrqI5Q0J + rm /tmp/tmp.PRF5CwVZZR /tmp/tmp.yrjrqI5Q0J + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + desc 'recreate cluster' + set +o xtrace ----------------------------------------------------------------------------------- recreate cluster ----------------------------------------------------------------------------------- + recreate + desc 'delete PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.m0uwy3YOnm ++ mktemp + local LAST_ERR=/tmp/tmp.0gJJHnOsrx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m0uwy3YOnm perconaservermongodb.psmdb.percona.com "some-name" deleted + cat /tmp/tmp.0gJJHnOsrx + rm /tmp/tmp.m0uwy3YOnm /tmp/tmp.0gJJHnOsrx + return 0 + wait_for_delete pod/some-name-rs0-2 + local res=pod/some-name-rs0-2 + local wait_time=60 + set +o xtrace pod/some-name-rs0-2 - .......Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found + wait_for_delete pod/some-name-rs0-1 + local res=pod/some-name-rs0-1 + local wait_time=60 + set +o xtrace pod/some-name-rs0-1 - Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found + wait_for_delete pod/some-name-rs0-0 + local res=pod/some-name-rs0-0 + local wait_time=60 + set +o xtrace pod/some-name-rs0-0 - Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found + desc 'create PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1585-fdd2d1e6"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.c7k0Em9PMZ ++ mktemp + local LAST_ERR=/tmp/tmp.WBS30W870X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c7k0Em9PMZ perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.WBS30W870X + rm /tmp/tmp.c7k0Em9PMZ /tmp/tmp.WBS30W870X + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4ilZMTQYvS +++ mktemp ++ local LAST_ERR=/tmp/tmp.1aPhaRoI4m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4ilZMTQYvS ++ cat /tmp/tmp.1aPhaRoI4m ++ rm /tmp/tmp.4ilZMTQYvS /tmp/tmp.1aPhaRoI4m ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uos11wGbgr +++ mktemp ++ local LAST_ERR=/tmp/tmp.mKq3SGI5BQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uos11wGbgr ++ cat /tmp/tmp.mKq3SGI5BQ ++ rm /tmp/tmp.Uos11wGbgr /tmp/tmp.mKq3SGI5BQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HywofxNMvD +++ mktemp ++ local LAST_ERR=/tmp/tmp.8u7cyRN1pz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HywofxNMvD ++ cat /tmp/tmp.8u7cyRN1pz ++ rm /tmp/tmp.HywofxNMvD /tmp/tmp.8u7cyRN1pz ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zFnpZE1t15 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AXGYcmHnSo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zFnpZE1t15 ++ cat /tmp/tmp.AXGYcmHnSo ++ rm /tmp/tmp.zFnpZE1t15 /tmp/tmp.AXGYcmHnSo ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.M9Md9kEWpM ++ mktemp + local LAST_ERR=/tmp/tmp.jluAhClT6v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M9Md9kEWpM + cat /tmp/tmp.jluAhClT6v + rm /tmp/tmp.M9Md9kEWpM /tmp/tmp.jluAhClT6v + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TcmDr8G49X +++ mktemp ++ local LAST_ERR=/tmp/tmp.CUfP9knBwY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TcmDr8G49X ++ cat /tmp/tmp.CUfP9knBwY ++ rm /tmp/tmp.TcmDr8G49X /tmp/tmp.CUfP9knBwY ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Z24fOraYJ1 ++ mktemp + local LAST_ERR=/tmp/tmp.uAigebWAJI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z24fOraYJ1 + cat /tmp/tmp.uAigebWAJI + rm /tmp/tmp.Z24fOraYJ1 /tmp/tmp.uAigebWAJI + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5DREzRs0lV +++ mktemp ++ local LAST_ERR=/tmp/tmp.sjCObho2SH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5DREzRs0lV ++ cat /tmp/tmp.sjCObho2SH ++ rm /tmp/tmp.5DREzRs0lV /tmp/tmp.sjCObho2SH ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jkoIfdWzBc ++ mktemp + local LAST_ERR=/tmp/tmp.UBQmU3NZHW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jkoIfdWzBc + cat /tmp/tmp.UBQmU3NZHW + rm /tmp/tmp.jkoIfdWzBc /tmp/tmp.UBQmU3NZHW + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + desc 'kill node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- kill node-0 pod ----------------------------------------------------------------------------------- + kill_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ tr -d '"' ++ jq .metadata.resourceVersion ++ kubectl get pod some-name-rs0-0 -ojson + local old_resourceVersion=52346 + yq eval ' .metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-32328[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/chaos-pod-kill.yml + kubectl apply -f - podchaos.chaos-mesh.org/chaos-cluster-pod-kill created + sleep 5 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BR9Wh6TxeQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.z2sEmBAgbE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BR9Wh6TxeQ ++ cat /tmp/tmp.z2sEmBAgbE ++ rm /tmp/tmp.BR9Wh6TxeQ /tmp/tmp.z2sEmBAgbE ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7ba9eBFOL3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RTrJ5YqoL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7ba9eBFOL3 ++ cat /tmp/tmp.6RTrJ5YqoL ++ rm /tmp/tmp.7ba9eBFOL3 /tmp/tmp.6RTrJ5YqoL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y0t2Dtk2HK +++ mktemp ++ local LAST_ERR=/tmp/tmp.svPGI77cLJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y0t2Dtk2HK ++ cat /tmp/tmp.svPGI77cLJ ++ rm /tmp/tmp.y0t2Dtk2HK /tmp/tmp.svPGI77cLJ ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_pod_restarted some-name-rs0-0 52346 + local pod=some-name-rs0-0 + local old_resourceVersion=52346 ++ tr -d '"' ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion + local new_resourceVersion=53623 + [[ 52346 == \5\3\6\2\3 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local postfix= + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KbLrCdrHgO +++ mktemp ++ local LAST_ERR=/tmp/tmp.nUoucbdCod ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KbLrCdrHgO ++ cat /tmp/tmp.nUoucbdCod ++ rm /tmp/tmp.KbLrCdrHgO /tmp/tmp.nUoucbdCod ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.pImhaUOTq9 ++ mktemp + local LAST_ERR=/tmp/tmp.C2J5AfnwcM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pImhaUOTq9 + cat /tmp/tmp.C2J5AfnwcM + rm /tmp/tmp.pImhaUOTq9 /tmp/tmp.C2J5AfnwcM + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.NNt28idPHr/find + desc 'fail node-0 pod for 60s' + set +o xtrace ----------------------------------------------------------------------------------- fail node-0 pod for 60s ----------------------------------------------------------------------------------- + failure_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=53623 + kubectl apply -f - + yq eval ' .metadata.name = "chaos-cluster-pod-failure" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-32328[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/chaos-pod-failure.yml podchaos.chaos-mesh.org/chaos-cluster-pod-failure created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lunkm9PeGq +++ mktemp ++ local LAST_ERR=/tmp/tmp.D5VNa6WLc5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lunkm9PeGq ++ cat /tmp/tmp.D5VNa6WLc5 ++ rm /tmp/tmp.lunkm9PeGq /tmp/tmp.D5VNa6WLc5 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CNRI8GkNpc ++ mktemp + local LAST_ERR=/tmp/tmp.LYdelfSlDt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CNRI8GkNpc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fafd1bcb-47e5-4d90-ade0-735b2805f512") } Percona Server for MongoDB server version: v7.0.5-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.LYdelfSlDt + rm /tmp/tmp.CNRI8GkNpc /tmp/tmp.LYdelfSlDt + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dMx9seVbPf +++ mktemp ++ local LAST_ERR=/tmp/tmp.SD46Q5qEbK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dMx9seVbPf ++ cat /tmp/tmp.SD46Q5qEbK ++ rm /tmp/tmp.dMx9seVbPf /tmp/tmp.SD46Q5qEbK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MRh2QzFbcE +++ mktemp ++ local LAST_ERR=/tmp/tmp.T7W846DYrD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MRh2QzFbcE ++ cat /tmp/tmp.T7W846DYrD ++ rm /tmp/tmp.MRh2QzFbcE /tmp/tmp.T7W846DYrD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....................... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qd9ikhjkNn +++ mktemp ++ local LAST_ERR=/tmp/tmp.50ymhGPdZN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qd9ikhjkNn ++ cat /tmp/tmp.50ymhGPdZN ++ rm /tmp/tmp.Qd9ikhjkNn /tmp/tmp.50ymhGPdZN ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_pod_restarted some-name-rs0-0 53623 + local pod=some-name-rs0-0 + local old_resourceVersion=53623 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local new_resourceVersion=54720 + [[ 53623 == \5\4\7\2\0 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.bjvq8jBnev +++ mktemp ++ local LAST_ERR=/tmp/tmp.FlNtjTu1PK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bjvq8jBnev ++ cat /tmp/tmp.FlNtjTu1PK ++ rm /tmp/tmp.bjvq8jBnev /tmp/tmp.FlNtjTu1PK ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rIgyZjjUdz ++ mktemp + local LAST_ERR=/tmp/tmp.wNqBYAShWh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rIgyZjjUdz + cat /tmp/tmp.wNqBYAShWh + rm /tmp/tmp.rIgyZjjUdz /tmp/tmp.wNqBYAShWh + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find-2nd.json /tmp/tmp.NNt28idPHr/find-2nd + desc 'emulate bad network node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- emulate bad network node-0 pod ----------------------------------------------------------------------------------- + network_loss some-name-rs0-0 + local pod=some-name-rs0-0 + yq eval ' .metadata.name = "chaos-cluster-network-loss" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-32328[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/conf/chaos-network-loss.yml + kubectl apply -f - networkchaos.chaos-mesh.org/chaos-cluster-network-loss created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-32328 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G2N9o2wtAh +++ mktemp ++ local LAST_ERR=/tmp/tmp.7iOWzRHDOx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G2N9o2wtAh ++ cat /tmp/tmp.7iOWzRHDOx ++ rm /tmp/tmp.G2N9o2wtAh /tmp/tmp.7iOWzRHDOx ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BZLNJaWI3T ++ mktemp + local LAST_ERR=/tmp/tmp.UrF0mMKfyO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BZLNJaWI3T Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e84ead86-1bb2-4412-b689-ed0f4bc3359d") } Percona Server for MongoDB server version: v7.0.5-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.UrF0mMKfyO + rm /tmp/tmp.BZLNJaWI3T /tmp/tmp.UrF0mMKfyO + return 0 + sleep 60 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C4yZpTmSpX +++ mktemp ++ local LAST_ERR=/tmp/tmp.rq3G0SA60B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C4yZpTmSpX ++ cat /tmp/tmp.rq3G0SA60B ++ rm /tmp/tmp.C4yZpTmSpX /tmp/tmp.rq3G0SA60B ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H4oF4Z3jjU +++ mktemp ++ local LAST_ERR=/tmp/tmp.tbd2UswisP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H4oF4Z3jjU ++ cat /tmp/tmp.tbd2UswisP ++ rm /tmp/tmp.H4oF4Z3jjU /tmp/tmp.tbd2UswisP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v4jbpNP22H +++ mktemp ++ local LAST_ERR=/tmp/tmp.0tgy4oXMXs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v4jbpNP22H ++ cat /tmp/tmp.0tgy4oXMXs ++ rm /tmp/tmp.v4jbpNP22H /tmp/tmp.0tgy4oXMXs ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GtnjeRyTmo +++ mktemp ++ local LAST_ERR=/tmp/tmp.uKZGUDzTsu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GtnjeRyTmo ++ cat /tmp/tmp.uKZGUDzTsu ++ rm /tmp/tmp.GtnjeRyTmo /tmp/tmp.uKZGUDzTsu ++ return 0 + local client_container=psmdb-client-6c585f8dbd-b6cfq + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ppqJpQLr1q ++ mktemp + local LAST_ERR=/tmp/tmp.NGgr5WfP0F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-b6cfq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-32328.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ppqJpQLr1q + cat /tmp/tmp.NGgr5WfP0F + rm /tmp/tmp.ppqJpQLr1q /tmp/tmp.NGgr5WfP0F + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/e2e-tests/self-healing-chaos/compare/find-3rd.json /tmp/tmp.NNt28idPHr/find-3rd + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns=self-healing-chaos-32328 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-32328 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-32328 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-32328' ++ awk '{print $1}' + local kind=NetworkChaos ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-32328' ++ awk '{print $2}' + local name=chaos-cluster-network-loss ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-32328' ++ awk '{print $3}' + local namespace=self-healing-chaos-32328 + kubectl patch NetworkChaos chaos-cluster-network-loss -n self-healing-chaos-32328 --type=merge -p '{"metadata":{"finalizers":[]}}' networkchaos.chaos-mesh.org/chaos-cluster-network-loss patched + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces networkchaos.chaos-mesh.org "chaos-cluster-network-loss" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-32328' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-32328' ++ awk '{print $2}' + local name=chaos-cluster-pod-failure ++ awk '{print $3}' ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-32328' + local namespace=self-healing-chaos-32328 + kubectl patch PodChaos chaos-cluster-pod-failure -n self-healing-chaos-32328 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-failure patched + read -r line ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-32328' ++ awk '{print $1}' + local kind=PodChaos ++ awk '{print $2}' ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-32328' + local name=chaos-cluster-pod-kill ++ awk '{print $3}' ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-32328' + local namespace=self-healing-chaos-32328 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-32328 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-failure" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-32328' ++ awk '{print $1}' + local kind=PodNetworkChaos ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-32328' ++ awk '{print $2}' + local name=some-name-rs0-0 ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-32328' ++ awk '{print $3}' + local namespace=self-healing-chaos-32328 + kubectl patch PodNetworkChaos some-name-rs0-0 -n self-healing-chaos-32328 --type=merge -p '{"metadata":{"finalizers":[]}}' podnetworkchaos.chaos-mesh.org/some-name-rs0-0 patched (no change) + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces podnetworkchaos.chaos-mesh.org "some-name-rs0-0" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + destroy self-healing-chaos-32328 + local namespace=self-healing-chaos-32328 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.BZkRdJh3Cd ++ mktemp + local LAST_ERR=/tmp/tmp.lxovHWjLBN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BZkRdJh3Cd customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.lxovHWjLBN + rm /tmp/tmp.BZkRdJh3Cd /tmp/tmp.lxovHWjLBN + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Gt6LuXxbyT ++ mktemp + local LAST_ERR=/tmp/tmp.4lXe5LXCEy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Gt6LuXxbyT + cat /tmp/tmp.4lXe5LXCEy + rm /tmp/tmp.Gt6LuXxbyT /tmp/tmp.4lXe5LXCEy + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4d1INrQ3fd ++ mktemp + local LAST_ERR=/tmp/tmp.So3hmEkD4y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4d1INrQ3fd + cat /tmp/tmp.So3hmEkD4y + rm /tmp/tmp.4d1INrQ3fd /tmp/tmp.So3hmEkD4y + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OO9bGTSGWt ++ mktemp + local LAST_ERR=/tmp/tmp.bxBKVuKU1K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OO9bGTSGWt + cat /tmp/tmp.bxBKVuKU1K + rm /tmp/tmp.OO9bGTSGWt /tmp/tmp.bxBKVuKU1K + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.HWq8qgtpHI ++ mktemp + local LAST_ERR=/tmp/tmp.37BPTeKStl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1585/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HWq8qgtpHI clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.37BPTeKStl + rm /tmp/tmp.HWq8qgtpHI /tmp/tmp.37BPTeKStl + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Pde2WPQz58 ++ mktemp + local LAST_ERR=/tmp/tmp.yDZbYJGmfc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pde2WPQz58 + cat /tmp/tmp.yDZbYJGmfc Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pde2WPQz58 + cat /tmp/tmp.yDZbYJGmfc Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pde2WPQz58 + cat /tmp/tmp.yDZbYJGmfc Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.Pde2WPQz58 + cat /tmp/tmp.yDZbYJGmfc Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.Pde2WPQz58 /tmp/tmp.yDZbYJGmfc + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace self-healing-chaos-32328 + rm -rf /tmp/tmp.NNt28idPHr + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.GrehsyxrCM ++ mktemp + local LAST_OUT=/tmp/tmp.aVAdpzOSLE + local LAST_ERR=/tmp/tmp.PTGH6tUDTw + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.9NyDLXEXMS + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace self-healing-chaos-32328