Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/logs/self-healing-chaos.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + cluster=some-name-rs0 + main + create_infra self-healing-chaos-31913 + local ns=self-healing-chaos-31913 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.5qp9ieEjJt ++ mktemp + local LAST_ERR=/tmp/tmp.WLwjYz5tZg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5qp9ieEjJt customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WLwjYz5tZg + rm /tmp/tmp.5qp9ieEjJt /tmp/tmp.WLwjYz5tZg + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.PQvNhO0uZl ++ mktemp + local LAST_ERR=/tmp/tmp.oEAUtUfroA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PQvNhO0uZl + cat /tmp/tmp.oEAUtUfroA + rm /tmp/tmp.PQvNhO0uZl /tmp/tmp.oEAUtUfroA + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0506 10:35:43.539130 18383 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1: the server could not find the requested resource E0506 10:35:43.539308 18383 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-11-0: the server could not find the requested resource E0506 10:35:43.539837 18383 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-10-0: the server could not find the requested resource E0506 10:35:43.540036 18383 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-12-0: the server could not find the requested resource error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.pbKf3WmqWH ++ mktemp + local LAST_ERR=/tmp/tmp.SQWjS8QdWZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pbKf3WmqWH + cat /tmp/tmp.SQWjS8QdWZ + rm /tmp/tmp.pbKf3WmqWH /tmp/tmp.SQWjS8QdWZ + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jYuqTCFj6e ++ mktemp + local LAST_ERR=/tmp/tmp.233InRHQMZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jYuqTCFj6e + cat /tmp/tmp.233InRHQMZ + rm /tmp/tmp.jYuqTCFj6e /tmp/tmp.233InRHQMZ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.BrWd7vJPNp ++ mktemp + local LAST_ERR=/tmp/tmp.j1M9QlcArg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BrWd7vJPNp clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.j1M9QlcArg + rm /tmp/tmp.BrWd7vJPNp /tmp/tmp.j1M9QlcArg + return 0 + check_crd_for_deletion PR-1542-e268e038 + local git_tag=PR-1542-e268e038 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1542-e268e038/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H1YE5IQ7KC +++ mktemp ++ local LAST_ERR=/tmp/tmp.pHOTI5t4mH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.H1YE5IQ7KC ++ cat /tmp/tmp.pHOTI5t4mH Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.H1YE5IQ7KC ++ cat /tmp/tmp.pHOTI5t4mH Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.H1YE5IQ7KC ++ cat /tmp/tmp.pHOTI5t4mH Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.H1YE5IQ7KC ++ cat /tmp/tmp.pHOTI5t4mH Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.H1YE5IQ7KC /tmp/tmp.pHOTI5t4mH ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns=self-healing-chaos-11215 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-11215 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-11215 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.TGPBhDACDr ++ mktemp + local LAST_OUT=/tmp/tmp.0HeYZCMugO ++ mktemp + local LAST_ERR=/tmp/tmp.1CvwYPCtwf + local exit_status=0 + local timeout=4 + awk '{print$1}' ++ seq 0 2 + local LAST_ERR=/tmp/tmp.aDi9cvshVJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0HeYZCMugO + cat /tmp/tmp.aDi9cvshVJ + rm /tmp/tmp.0HeYZCMugO /tmp/tmp.aDi9cvshVJ + return 0 namespace "self-healing-chaos-11215" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TGPBhDACDr namespace "psmdb-operator" deleted + cat /tmp/tmp.1CvwYPCtwf + rm /tmp/tmp.TGPBhDACDr /tmp/tmp.1CvwYPCtwf + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ffX2l5CVyS ++ mktemp + local LAST_ERR=/tmp/tmp.9GA5C0CPaB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ffX2l5CVyS + cat /tmp/tmp.9GA5C0CPaB + rm /tmp/tmp.ffX2l5CVyS /tmp/tmp.9GA5C0CPaB + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.c3rAW1lN6b ++ mktemp + local LAST_ERR=/tmp/tmp.9nDZz5AHJp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c3rAW1lN6b namespace/psmdb-operator created + cat /tmp/tmp.9nDZz5AHJp + rm /tmp/tmp.c3rAW1lN6b /tmp/tmp.9nDZz5AHJp + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.HOIb5mKMjF +++ mktemp ++ local LAST_ERR=/tmp/tmp.0BwXvTTkeL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HOIb5mKMjF ++ cat /tmp/tmp.0BwXvTTkeL ++ rm /tmp/tmp.HOIb5mKMjF /tmp/tmp.0BwXvTTkeL ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.uATPhFnDJF ++ mktemp + local LAST_ERR=/tmp/tmp.X9mA84Srr4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uATPhFnDJF Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3" modified. + cat /tmp/tmp.X9mA84Srr4 + rm /tmp/tmp.uATPhFnDJF /tmp/tmp.X9mA84Srr4 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vLLk3hXEdA ++ mktemp + local LAST_ERR=/tmp/tmp.AQFHSIAeNi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vLLk3hXEdA customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.AQFHSIAeNi + rm /tmp/tmp.vLLk3hXEdA /tmp/tmp.AQFHSIAeNi + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.zYlCXxVsYv ++ mktemp + local LAST_ERR=/tmp/tmp.7nv1Y6m2bm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zYlCXxVsYv clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.7nv1Y6m2bm + rm /tmp/tmp.zYlCXxVsYv /tmp/tmp.7nv1Y6m2bm + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1542-e268e038") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.MO7M9lH6CY ++ mktemp + local LAST_ERR=/tmp/tmp.lzYxFUHeO2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MO7M9lH6CY deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.lzYxFUHeO2 + rm /tmp/tmp.MO7M9lH6CY /tmp/tmp.lzYxFUHeO2 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.6aPbptFHaE +++ mktemp ++ local LAST_ERR=/tmp/tmp.HXcdYrf34p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6aPbptFHaE ++ cat /tmp/tmp.HXcdYrf34p ++ rm /tmp/tmp.6aPbptFHaE /tmp/tmp.HXcdYrf34p ++ return 0 + wait_pod percona-server-mongodb-operator-fd9c6c884-n75tt + local pod=percona-server-mongodb-operator-fd9c6c884-n75tt + set +o xtrace waiting for pod/percona-server-mongodb-operator-fd9c6c884-n75tt to be ready.OK + create_namespace self-healing-chaos-31913 + local namespace=self-healing-chaos-31913 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces self-healing-chaos-31913' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces self-healing-chaos-31913 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace self-healing-chaos-31913 --ignore-not-found + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.qqCxQa0ggs + local LAST_OUT=/tmp/tmp.gd6z9sBzB0 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.4bM1yM7WmZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.cu1jeA4o7X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace self-healing-chaos-31913 --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gd6z9sBzB0 + cat /tmp/tmp.cu1jeA4o7X + rm /tmp/tmp.gd6z9sBzB0 /tmp/tmp.cu1jeA4o7X + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qqCxQa0ggs + cat /tmp/tmp.4bM1yM7WmZ + rm /tmp/tmp.qqCxQa0ggs /tmp/tmp.4bM1yM7WmZ + return 0 + kubectl_bin wait --for=delete namespace self-healing-chaos-31913 ++ mktemp + local LAST_OUT=/tmp/tmp.vF14GkkdsW ++ mktemp + local LAST_ERR=/tmp/tmp.TN2gae7fDI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace self-healing-chaos-31913 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vF14GkkdsW + cat /tmp/tmp.TN2gae7fDI + rm /tmp/tmp.vF14GkkdsW /tmp/tmp.TN2gae7fDI + return 0 + desc 'create namespace self-healing-chaos-31913' + set +o xtrace ----------------------------------------------------------------------------------- create namespace self-healing-chaos-31913 ----------------------------------------------------------------------------------- + kubectl_bin create namespace self-healing-chaos-31913 ++ mktemp + local LAST_OUT=/tmp/tmp.xrqaIp37b0 ++ mktemp + local LAST_ERR=/tmp/tmp.odt2Z3aQHW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace self-healing-chaos-31913 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xrqaIp37b0 namespace/self-healing-chaos-31913 created + cat /tmp/tmp.odt2Z3aQHW + rm /tmp/tmp.xrqaIp37b0 /tmp/tmp.odt2Z3aQHW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.lshyMkImy4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YQlJUHY1kR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lshyMkImy4 ++ cat /tmp/tmp.YQlJUHY1kR ++ rm /tmp/tmp.lshyMkImy4 /tmp/tmp.YQlJUHY1kR ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3 --namespace=self-healing-chaos-31913 ++ mktemp + local LAST_OUT=/tmp/tmp.dNc3rfgL2l ++ mktemp + local LAST_ERR=/tmp/tmp.VB6R8nsjTw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3 --namespace=self-healing-chaos-31913 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dNc3rfgL2l Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1542-e268e038-1-cluster3" modified. + cat /tmp/tmp.VB6R8nsjTw + rm /tmp/tmp.dNc3rfgL2l /tmp/tmp.VB6R8nsjTw + return 0 + deploy_chaos_mesh self-healing-chaos-31913 + local chaos_mesh_ns=self-healing-chaos-31913 + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'install chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- install chaos-mesh ----------------------------------------------------------------------------------- + helm repo add chaos-mesh https://charts.chaos-mesh.org "chaos-mesh" already exists with the same configuration, skipping + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=self-healing-chaos-31913 --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 NAME: chaos-mesh LAST DEPLOYED: Mon May 6 10:38:00 2024 NAMESPACE: self-healing-chaos-31913 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: 1. Make sure chaos-mesh components are running kubectl get pods --namespace self-healing-chaos-31913 -l app.kubernetes.io/instance=chaos-mesh + sleep 10 + desc 'start cluster' + set +o xtrace ----------------------------------------------------------------------------------- start cluster ----------------------------------------------------------------------------------- + setup_cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RYETxYzxVk ++ mktemp + local LAST_ERR=/tmp/tmp.Ocu9nRXWG5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RYETxYzxVk secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.Ocu9nRXWG5 + rm /tmp/tmp.RYETxYzxVk /tmp/tmp.Ocu9nRXWG5 + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1542-e268e038"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.Wiqg3C1k0P ++ mktemp + local LAST_ERR=/tmp/tmp.m9GakgCPp0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wiqg3C1k0P perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.m9GakgCPp0 + rm /tmp/tmp.Wiqg3C1k0P /tmp/tmp.m9GakgCPp0 + return 0 + desc 'enable backups' + set +o xtrace ----------------------------------------------------------------------------------- enable backups ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.65JtsHMF85 ++ mktemp + local LAST_ERR=/tmp/tmp.pmUzI3SDRr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge -p '{"spec":{"backup":{"enabled":true}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.65JtsHMF85 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.pmUzI3SDRr + rm /tmp/tmp.65JtsHMF85 /tmp/tmp.pmUzI3SDRr + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q0fRb87vcp +++ mktemp ++ local LAST_ERR=/tmp/tmp.P95iOwiomz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q0fRb87vcp ++ cat /tmp/tmp.P95iOwiomz ++ rm /tmp/tmp.q0fRb87vcp /tmp/tmp.P95iOwiomz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p7LpTOq18b +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fxh7OaqpKg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p7LpTOq18b ++ cat /tmp/tmp.Fxh7OaqpKg ++ rm /tmp/tmp.p7LpTOq18b /tmp/tmp.Fxh7OaqpKg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...................... + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-31913 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-31913 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZDeOqWW0pR +++ mktemp ++ local LAST_ERR=/tmp/tmp.FO1CkXGjkL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZDeOqWW0pR ++ cat /tmp/tmp.FO1CkXGjkL ++ rm /tmp/tmp.ZDeOqWW0pR /tmp/tmp.FO1CkXGjkL ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wlPtvSjb7V ++ mktemp + local LAST_ERR=/tmp/tmp.ytjA7dDGB6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wlPtvSjb7V Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ae7e46fa-8853-4c14-91fc-6fd81c53cb75") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.ytjA7dDGB6 + rm /tmp/tmp.wlPtvSjb7V /tmp/tmp.ytjA7dDGB6 + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n6Q5RK3Jvp +++ mktemp ++ local LAST_ERR=/tmp/tmp.gJt0MbekSD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n6Q5RK3Jvp ++ cat /tmp/tmp.gJt0MbekSD ++ rm /tmp/tmp.n6Q5RK3Jvp /tmp/tmp.gJt0MbekSD ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9BXC8PuQeB ++ mktemp + local LAST_ERR=/tmp/tmp.tjrUmTZyjC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9BXC8PuQeB Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c895094f-fbe5-40d0-ba40-5ddc032444d8") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tjrUmTZyjC + rm /tmp/tmp.9BXC8PuQeB /tmp/tmp.tjrUmTZyjC + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wWih22ifp3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RkrCnmmjdk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wWih22ifp3 ++ cat /tmp/tmp.RkrCnmmjdk ++ rm /tmp/tmp.wWih22ifp3 /tmp/tmp.RkrCnmmjdk ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.g5JgqW8pv9 ++ mktemp + local LAST_ERR=/tmp/tmp.RmEz0iYHKC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g5JgqW8pv9 + cat /tmp/tmp.RmEz0iYHKC + rm /tmp/tmp.g5JgqW8pv9 /tmp/tmp.RmEz0iYHKC + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vbmkWmj8es +++ mktemp ++ local LAST_ERR=/tmp/tmp.ii93sETqFk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vbmkWmj8es ++ cat /tmp/tmp.ii93sETqFk ++ rm /tmp/tmp.vbmkWmj8es /tmp/tmp.ii93sETqFk ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.sP9cQZ1bCy ++ mktemp + local LAST_ERR=/tmp/tmp.LPQNLyp0Dr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sP9cQZ1bCy + cat /tmp/tmp.LPQNLyp0Dr + rm /tmp/tmp.sP9cQZ1bCy /tmp/tmp.LPQNLyp0Dr + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JOxsujBFV4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bBTZK9l9FT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JOxsujBFV4 ++ cat /tmp/tmp.bBTZK9l9FT ++ rm /tmp/tmp.JOxsujBFV4 /tmp/tmp.bBTZK9l9FT ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9R5KUfmGtM ++ mktemp + local LAST_ERR=/tmp/tmp.hxXGKImvxT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9R5KUfmGtM + cat /tmp/tmp.hxXGKImvxT + rm /tmp/tmp.9R5KUfmGtM /tmp/tmp.hxXGKImvxT + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + desc 'recreate cluster' + set +o xtrace ----------------------------------------------------------------------------------- recreate cluster ----------------------------------------------------------------------------------- + recreate + desc 'delete PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3zepIhE0Fc ++ mktemp + local LAST_ERR=/tmp/tmp.GbYzn6xx2E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3zepIhE0Fc perconaservermongodb.psmdb.percona.com "some-name" deleted + cat /tmp/tmp.GbYzn6xx2E + rm /tmp/tmp.3zepIhE0Fc /tmp/tmp.GbYzn6xx2E + return 0 + wait_for_delete pod/some-name-rs0-2 + local res=pod/some-name-rs0-2 + local wait_time=60 + set +o xtrace pod/some-name-rs0-2 - .........Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found Error from server (NotFound): pods "some-name-rs0-2" not found + wait_for_delete pod/some-name-rs0-1 + local res=pod/some-name-rs0-1 + local wait_time=60 + set +o xtrace pod/some-name-rs0-1 - Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found Error from server (NotFound): pods "some-name-rs0-1" not found + wait_for_delete pod/some-name-rs0-0 + local res=pod/some-name-rs0-0 + local wait_time=60 + set +o xtrace pod/some-name-rs0-0 - Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found Error from server (NotFound): pods "some-name-rs0-0" not found + desc 'create PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1542-e268e038"' + local LAST_OUT=/tmp/tmp.Nh3ZV8krjL + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.W5BAp8bZXD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nh3ZV8krjL perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.W5BAp8bZXD + rm /tmp/tmp.Nh3ZV8krjL /tmp/tmp.W5BAp8bZXD + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OMyzB70Ytr +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jz7IS1ckj4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OMyzB70Ytr ++ cat /tmp/tmp.Jz7IS1ckj4 ++ rm /tmp/tmp.OMyzB70Ytr /tmp/tmp.Jz7IS1ckj4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UpnYJi6iYb +++ mktemp ++ local LAST_ERR=/tmp/tmp.j2ryNEbKOe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UpnYJi6iYb ++ cat /tmp/tmp.j2ryNEbKOe ++ rm /tmp/tmp.UpnYJi6iYb /tmp/tmp.j2ryNEbKOe ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.auaIkhrujE +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZjF48CmZBU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.auaIkhrujE ++ cat /tmp/tmp.ZjF48CmZBU ++ rm /tmp/tmp.auaIkhrujE /tmp/tmp.ZjF48CmZBU ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U6EEtJ40mt +++ mktemp ++ local LAST_ERR=/tmp/tmp.TzDQvrDjOV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U6EEtJ40mt ++ cat /tmp/tmp.TzDQvrDjOV ++ rm /tmp/tmp.U6EEtJ40mt /tmp/tmp.TzDQvrDjOV ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6q4SzdskGY ++ mktemp + local LAST_ERR=/tmp/tmp.spXvewGqQa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6q4SzdskGY + cat /tmp/tmp.spXvewGqQa + rm /tmp/tmp.6q4SzdskGY /tmp/tmp.spXvewGqQa + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D2IkXYwcKf +++ mktemp ++ local LAST_ERR=/tmp/tmp.CFnfsLPtBL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D2IkXYwcKf ++ cat /tmp/tmp.CFnfsLPtBL ++ rm /tmp/tmp.D2IkXYwcKf /tmp/tmp.CFnfsLPtBL ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qfFBh91zlF ++ mktemp + local LAST_ERR=/tmp/tmp.UR2mIsDj5v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qfFBh91zlF + cat /tmp/tmp.UR2mIsDj5v + rm /tmp/tmp.qfFBh91zlF /tmp/tmp.UR2mIsDj5v + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.277RcCRNrZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.spQ7233HjC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.277RcCRNrZ ++ cat /tmp/tmp.spQ7233HjC ++ rm /tmp/tmp.277RcCRNrZ /tmp/tmp.spQ7233HjC ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5EJk4F6VpS ++ mktemp + local LAST_ERR=/tmp/tmp.eYafsFuIYE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5EJk4F6VpS + cat /tmp/tmp.eYafsFuIYE + rm /tmp/tmp.5EJk4F6VpS /tmp/tmp.eYafsFuIYE + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + desc 'kill node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- kill node-0 pod ----------------------------------------------------------------------------------- + kill_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=67512 + yq eval ' .metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-31913[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/chaos-pod-kill.yml + kubectl apply -f - podchaos.chaos-mesh.org/chaos-cluster-pod-kill created + sleep 5 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hjQiwdR5yf +++ mktemp ++ local LAST_ERR=/tmp/tmp.CgRp6FurXp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hjQiwdR5yf ++ cat /tmp/tmp.CgRp6FurXp ++ rm /tmp/tmp.hjQiwdR5yf /tmp/tmp.CgRp6FurXp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BsVIG7iENQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.RzZnNI0yBh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BsVIG7iENQ ++ cat /tmp/tmp.RzZnNI0yBh ++ rm /tmp/tmp.BsVIG7iENQ /tmp/tmp.RzZnNI0yBh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YkFwOydxTy +++ mktemp ++ local LAST_ERR=/tmp/tmp.GC1ujX4gkS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YkFwOydxTy ++ cat /tmp/tmp.GC1ujX4gkS ++ rm /tmp/tmp.YkFwOydxTy /tmp/tmp.GC1ujX4gkS ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_pod_restarted some-name-rs0-0 67512 + local pod=some-name-rs0-0 + local old_resourceVersion=67512 ++ kubectl get pod some-name-rs0-0 -ojson ++ tr -d '"' ++ jq .metadata.resourceVersion + local new_resourceVersion=68593 + [[ 67512 == \6\8\5\9\3 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.twUgopHP1T +++ mktemp ++ local LAST_ERR=/tmp/tmp.BG1jGJqMkH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.twUgopHP1T ++ cat /tmp/tmp.BG1jGJqMkH ++ rm /tmp/tmp.twUgopHP1T /tmp/tmp.BG1jGJqMkH ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QtDo60ya0E ++ mktemp + local LAST_ERR=/tmp/tmp.ck0EcVL0K4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QtDo60ya0E + cat /tmp/tmp.ck0EcVL0K4 + rm /tmp/tmp.QtDo60ya0E /tmp/tmp.ck0EcVL0K4 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find.json /tmp/tmp.75ssWTTcnr/find + desc 'fail node-0 pod for 60s' + set +o xtrace ----------------------------------------------------------------------------------- fail node-0 pod for 60s ----------------------------------------------------------------------------------- + failure_pod some-name-rs0-0 + local pod=some-name-rs0-0 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local old_resourceVersion=68593 + kubectl apply -f - + yq eval ' .metadata.name = "chaos-cluster-pod-failure" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-31913[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/chaos-pod-failure.yml podchaos.chaos-mesh.org/chaos-cluster-pod-failure created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OZCf0saCEr +++ mktemp ++ local LAST_ERR=/tmp/tmp.U4SN23DHjw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OZCf0saCEr ++ cat /tmp/tmp.U4SN23DHjw ++ rm /tmp/tmp.OZCf0saCEr /tmp/tmp.U4SN23DHjw ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.eT5qdmMEKW ++ mktemp + local LAST_ERR=/tmp/tmp.gwVfnqSz4p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eT5qdmMEKW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e2c42131-f88d-4fcc-b6ae-e4cd7c598f17") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.gwVfnqSz4p + rm /tmp/tmp.eT5qdmMEKW /tmp/tmp.gwVfnqSz4p + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EgmiGjcu7e +++ mktemp ++ local LAST_ERR=/tmp/tmp.4vf8bd7q6X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EgmiGjcu7e ++ cat /tmp/tmp.4vf8bd7q6X ++ rm /tmp/tmp.EgmiGjcu7e /tmp/tmp.4vf8bd7q6X ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YuagjwGSYo +++ mktemp ++ local LAST_ERR=/tmp/tmp.fhhwUyqcpn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YuagjwGSYo ++ cat /tmp/tmp.fhhwUyqcpn ++ rm /tmp/tmp.YuagjwGSYo /tmp/tmp.fhhwUyqcpn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7mUqTGKP9M +++ mktemp ++ local LAST_ERR=/tmp/tmp.sOoqZEQ9jc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7mUqTGKP9M ++ cat /tmp/tmp.sOoqZEQ9jc ++ rm /tmp/tmp.7mUqTGKP9M /tmp/tmp.sOoqZEQ9jc ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_pod_restarted some-name-rs0-0 68593 + local pod=some-name-rs0-0 + local old_resourceVersion=68593 ++ kubectl get pod some-name-rs0-0 -ojson ++ jq .metadata.resourceVersion ++ tr -d '"' + local new_resourceVersion=69536 + [[ 68593 == \6\9\5\3\6 ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7MvNoIgmhI +++ mktemp ++ local LAST_ERR=/tmp/tmp.LhFBjEEaze ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7MvNoIgmhI ++ cat /tmp/tmp.LhFBjEEaze ++ rm /tmp/tmp.7MvNoIgmhI /tmp/tmp.LhFBjEEaze ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XIGFrxkQms ++ mktemp + local LAST_ERR=/tmp/tmp.lvtIsSXmwJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XIGFrxkQms + cat /tmp/tmp.lvtIsSXmwJ + rm /tmp/tmp.XIGFrxkQms /tmp/tmp.lvtIsSXmwJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find-2nd.json /tmp/tmp.75ssWTTcnr/find-2nd + desc 'emulate bad network node-0 pod' + set +o xtrace ----------------------------------------------------------------------------------- emulate bad network node-0 pod ----------------------------------------------------------------------------------- + network_loss some-name-rs0-0 + local pod=some-name-rs0-0 + yq eval ' .metadata.name = "chaos-cluster-network-loss" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.self-healing-chaos-31913[0] = "some-name-rs0-0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/conf/chaos-network-loss.yml + kubectl apply -f - networkchaos.chaos-mesh.org/chaos-cluster-network-loss created + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.self-healing-chaos-31913 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n0dagq0MXf +++ mktemp ++ local LAST_ERR=/tmp/tmp.0wT3gb1oFs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n0dagq0MXf ++ cat /tmp/tmp.0wT3gb1oFs ++ rm /tmp/tmp.n0dagq0MXf /tmp/tmp.0wT3gb1oFs ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BhcKHLHWc7 ++ mktemp + local LAST_ERR=/tmp/tmp.wZCk8hcJXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BhcKHLHWc7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.self-healing-chaos-31913.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ed15bf5f-3172-450f-88b4-380ed35f4a2e") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.wZCk8hcJXb + rm /tmp/tmp.BhcKHLHWc7 /tmp/tmp.wZCk8hcJXb + return 0 + sleep 60 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tD9vEDp7Cz +++ mktemp ++ local LAST_ERR=/tmp/tmp.yT3Ixf7qvV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tD9vEDp7Cz ++ cat /tmp/tmp.yT3Ixf7qvV ++ rm /tmp/tmp.tD9vEDp7Cz /tmp/tmp.yT3Ixf7qvV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QHSx9RJQUl +++ mktemp ++ local LAST_ERR=/tmp/tmp.cGrDT0yH5X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QHSx9RJQUl ++ cat /tmp/tmp.cGrDT0yH5X ++ rm /tmp/tmp.QHSx9RJQUl /tmp/tmp.cGrDT0yH5X ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z7hj2aFIQQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.20IrvuCUOE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z7hj2aFIQQ ++ cat /tmp/tmp.20IrvuCUOE ++ rm /tmp/tmp.z7hj2aFIQQ /tmp/tmp.20IrvuCUOE ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'check data consistency for chaosed Pod' + set +o xtrace ----------------------------------------------------------------------------------- check data consistency for chaosed Pod ----------------------------------------------------------------------------------- + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rrW9XpElXV +++ mktemp ++ local LAST_ERR=/tmp/tmp.L1FyndFJta ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rrW9XpElXV ++ cat /tmp/tmp.L1FyndFJta ++ rm /tmp/tmp.rrW9XpElXV /tmp/tmp.L1FyndFJta ++ return 0 + local client_container=psmdb-client-5dc94d5b48-h5x7c + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ikDFVzAiWU ++ mktemp + local LAST_ERR=/tmp/tmp.qckAeyhOke + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5dc94d5b48-h5x7c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.self-healing-chaos-31913.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ikDFVzAiWU + cat /tmp/tmp.qckAeyhOke + rm /tmp/tmp.ikDFVzAiWU /tmp/tmp.qckAeyhOke + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/e2e-tests/self-healing-chaos/compare/find-3rd.json /tmp/tmp.75ssWTTcnr/find-3rd + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns=self-healing-chaos-31913 + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n self-healing-chaos-31913 ']' + helm uninstall --wait --timeout 60s chaos-mesh --namespace self-healing-chaos-31913 release "chaos-mesh" uninstalled ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete awschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete azurechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete blockchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete dnschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete gcpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete httpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete iochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete jvmchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete kernelchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-31913' ++ awk '{print $1}' + local kind=NetworkChaos ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-31913' ++ awk '{print $2}' + local name=chaos-cluster-network-loss ++ echo 'NetworkChaos chaos-cluster-network-loss self-healing-chaos-31913' ++ awk '{print $3}' + local namespace=self-healing-chaos-31913 + kubectl patch NetworkChaos chaos-cluster-network-loss -n self-healing-chaos-31913 --type=merge -p '{"metadata":{"finalizers":[]}}' networkchaos.chaos-mesh.org/chaos-cluster-network-loss patched + read -r line + timeout 30 kubectl delete networkchaos --all --all-namespaces networkchaos.chaos-mesh.org "chaos-cluster-network-loss" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete physicalmachines --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-31913' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-31913' ++ awk '{print $2}' + local name=chaos-cluster-pod-failure ++ echo 'PodChaos chaos-cluster-pod-failure self-healing-chaos-31913' ++ awk '{print $3}' + local namespace=self-healing-chaos-31913 + kubectl patch PodChaos chaos-cluster-pod-failure -n self-healing-chaos-31913 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-failure patched + read -r line ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-31913' ++ awk '{print $1}' + local kind=PodChaos ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-31913' ++ awk '{print $2}' + local name=chaos-cluster-pod-kill ++ echo 'PodChaos chaos-cluster-pod-kill self-healing-chaos-31913' ++ awk '{print $3}' + local namespace=self-healing-chaos-31913 + kubectl patch PodChaos chaos-cluster-pod-kill -n self-healing-chaos-31913 --type=merge -p '{"metadata":{"finalizers":[]}}' podchaos.chaos-mesh.org/chaos-cluster-pod-kill patched + read -r line + timeout 30 kubectl delete podchaos --all --all-namespaces podchaos.chaos-mesh.org "chaos-cluster-pod-failure" deleted podchaos.chaos-mesh.org "chaos-cluster-pod-kill" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podhttpchaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete podiochaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line ++ awk '{print $1}' ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-31913' + local kind=PodNetworkChaos ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-31913' ++ awk '{print $2}' + local name=some-name-rs0-0 ++ echo 'PodNetworkChaos some-name-rs0-0 self-healing-chaos-31913' ++ awk '{print $3}' + local namespace=self-healing-chaos-31913 + kubectl patch PodNetworkChaos some-name-rs0-0 -n self-healing-chaos-31913 --type=merge -p '{"metadata":{"finalizers":[]}}' podnetworkchaos.chaos-mesh.org/some-name-rs0-0 patched (no change) + read -r line + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces podnetworkchaos.chaos-mesh.org "some-name-rs0-0" deleted + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete remoteclusters --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + read -r line + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + timeout 30 kubectl delete schedules --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete statuschecks --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete stresschaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete timechaos --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflownodes --all --all-namespaces No resources found + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace + read -r line + timeout 30 kubectl delete workflows --all --all-namespaces No resources found ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + destroy self-healing-chaos-31913 + local namespace=self-healing-chaos-31913 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.anbZnG718A ++ mktemp + local LAST_ERR=/tmp/tmp.QuzRED8h91 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.anbZnG718A customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.QuzRED8h91 + rm /tmp/tmp.anbZnG718A /tmp/tmp.QuzRED8h91 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JaCfL1Gkz0 ++ mktemp + local LAST_ERR=/tmp/tmp.nng1ojH5ax + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JaCfL1Gkz0 + cat /tmp/tmp.nng1ojH5ax + rm /tmp/tmp.JaCfL1Gkz0 /tmp/tmp.nng1ojH5ax + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.AAVtRJYrMA ++ mktemp + local LAST_ERR=/tmp/tmp.Ioiv1HT9CW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AAVtRJYrMA + cat /tmp/tmp.Ioiv1HT9CW + rm /tmp/tmp.AAVtRJYrMA /tmp/tmp.Ioiv1HT9CW + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1Cs7SkYZGV ++ mktemp + local LAST_ERR=/tmp/tmp.oooA31DLV0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1Cs7SkYZGV + cat /tmp/tmp.oooA31DLV0 + rm /tmp/tmp.1Cs7SkYZGV /tmp/tmp.oooA31DLV0 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oTaDTGTwAF ++ mktemp + local LAST_ERR=/tmp/tmp.P5R7hDIWG7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1542/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oTaDTGTwAF clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.P5R7hDIWG7 + rm /tmp/tmp.oTaDTGTwAF /tmp/tmp.P5R7hDIWG7 + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace self-healing-chaos-31913 + rm -rf /tmp/tmp.75ssWTTcnr + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.FlE83fLJJn ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.4KEwnkN3p6 ++ mktemp + local LAST_ERR=/tmp/tmp.gZjJgb0Z2t + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.7L8ppauQB1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace self-healing-chaos-31913 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator