Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/logs/upgrade-consistency-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=some-name + main + create_infra upgrade-consistency-25326 + local ns=upgrade-consistency-25326 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n upgrade-consistency-10928 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/some-name patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4E1PquKe5Y ++ mktemp + local LAST_ERR=/tmp/tmp.orEtalcEf0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4E1PquKe5Y perconaxtradbcluster.pxc.percona.com "some-name" deleted from upgrade-consistency-10928 namespace + cat /tmp/tmp.orEtalcEf0 + rm /tmp/tmp.4E1PquKe5Y /tmp/tmp.orEtalcEf0 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.VC9T2zZywI ++ mktemp + local LAST_ERR=/tmp/tmp.k3ZiIeXfvw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VC9T2zZywI No resources found + cat /tmp/tmp.k3ZiIeXfvw + rm /tmp/tmp.VC9T2zZywI /tmp/tmp.k3ZiIeXfvw + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.P14VOSASKK ++ mktemp + local LAST_ERR=/tmp/tmp.FGszgXJgMf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.P14VOSASKK No resources found + cat /tmp/tmp.FGszgXJgMf + rm /tmp/tmp.P14VOSASKK /tmp/tmp.FGszgXJgMf + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.wNSDIPHroC ++ mktemp + local LAST_ERR=/tmp/tmp.v2ga2YhoYW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.4sneJ1qo0F ++ mktemp + local LAST_ERR=/tmp/tmp.SkewyYcM67 + local exit_status=0 ++ seq 0 2 + awk '{print$1}' + xargs kubectl delete ns + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4sneJ1qo0F + cat /tmp/tmp.SkewyYcM67 + rm /tmp/tmp.4sneJ1qo0F /tmp/tmp.SkewyYcM67 + return 0 namespace "upgrade-consistency-10928" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wNSDIPHroC namespace "pxc-operator" deleted + cat /tmp/tmp.v2ga2YhoYW + rm /tmp/tmp.wNSDIPHroC /tmp/tmp.v2ga2YhoYW + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.q49i3bIsdR ++ mktemp + local LAST_ERR=/tmp/tmp.7t0g9kij9C + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.q49i3bIsdR namespace/pxc-operator created + cat /tmp/tmp.7t0g9kij9C + rm /tmp/tmp.q49i3bIsdR /tmp/tmp.7t0g9kij9C + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.C0wfT74H32 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MWUx6zAB13 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.C0wfT74H32 ++ cat /tmp/tmp.MWUx6zAB13 ++ rm /tmp/tmp.C0wfT74H32 /tmp/tmp.MWUx6zAB13 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EVDd9AFqer ++ mktemp + local LAST_ERR=/tmp/tmp.fjIf6vK14G + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EVDd9AFqer Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9" modified. + cat /tmp/tmp.fjIf6vK14G + rm /tmp/tmp.EVDd9AFqer /tmp/tmp.fjIf6vK14G + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.n5pkjQ5WST ++ mktemp + local LAST_ERR=/tmp/tmp.JqG1wg3SGD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.n5pkjQ5WST customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.JqG1wg3SGD + rm /tmp/tmp.n5pkjQ5WST /tmp/tmp.JqG1wg3SGD + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.skG9eEtUWl ++ mktemp + local LAST_ERR=/tmp/tmp.o5sQMDWCmj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.skG9eEtUWl clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.o5sQMDWCmj + rm /tmp/tmp.skG9eEtUWl /tmp/tmp.o5sQMDWCmj + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2002-42929599^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' ++ mktemp + local LAST_OUT=/tmp/tmp.dJBTsbtvss ++ mktemp + local LAST_ERR=/tmp/tmp.BYdBCIRAqD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dJBTsbtvss deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.BYdBCIRAqD + rm /tmp/tmp.dJBTsbtvss /tmp/tmp.BYdBCIRAqD + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.TPjvRvPMSw ++ mktemp + local LAST_ERR=/tmp/tmp.jkhRumWWSv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TPjvRvPMSw pod/percona-xtradb-cluster-operator-6c8dc776-5l9tn condition met + cat /tmp/tmp.jkhRumWWSv + rm /tmp/tmp.TPjvRvPMSw /tmp/tmp.jkhRumWWSv + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1noH9Bl2GH +++ mktemp ++ local LAST_ERR=/tmp/tmp.igP9p54Jdk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1noH9Bl2GH ++ cat /tmp/tmp.igP9p54Jdk ++ rm /tmp/tmp.1noH9Bl2GH /tmp/tmp.igP9p54Jdk ++ return 0 + wait_pod percona-xtradb-cluster-operator-6c8dc776-5l9tn 480 pxc-operator + local pod=percona-xtradb-cluster-operator-6c8dc776-5l9tn + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo percona-xtradb-cluster-operator-6c8dc776-5l9tn + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-6c8dc776-5l9tn condition met waiting for pod/percona-xtradb-cluster-operator-6c8dc776-5l9tn to become Ready.Ok + sleep 3 + create_namespace upgrade-consistency-25326 + local namespace=upgrade-consistency-25326 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-consistency-25326' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-consistency-25326 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-consistency-25326 ++ mktemp + local LAST_OUT=/tmp/tmp.IROjAcU6Vj + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.vf1fTyuRzZ + local exit_status=0 + local LAST_OUT=/tmp/tmp.XjmVyAUVKQ ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-consistency-25326 ++ mktemp + local LAST_ERR=/tmp/tmp.TzdwsZKHzD + local exit_status=0 + awk '{print$1}' + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-consistency-25326 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XjmVyAUVKQ + cat /tmp/tmp.TzdwsZKHzD + rm /tmp/tmp.XjmVyAUVKQ /tmp/tmp.TzdwsZKHzD + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-consistency-25326 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.IROjAcU6Vj + cat /tmp/tmp.vf1fTyuRzZ Error from server (NotFound): namespaces "upgrade-consistency-25326" not found + rm /tmp/tmp.IROjAcU6Vj /tmp/tmp.vf1fTyuRzZ + return 1 + : + wait_for_delete namespace/upgrade-consistency-25326 + local res=namespace/upgrade-consistency-25326 + echo -n 'waiting for namespace/upgrade-consistency-25326 to be deleted' waiting for namespace/upgrade-consistency-25326 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "upgrade-consistency-25326" not found + desc 'create namespace upgrade-consistency-25326' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-consistency-25326 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-consistency-25326 ++ mktemp + local LAST_OUT=/tmp/tmp.tsJvjx2CsW ++ mktemp + local LAST_ERR=/tmp/tmp.x5xHNmdgGJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-consistency-25326 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tsJvjx2CsW namespace/upgrade-consistency-25326 created + cat /tmp/tmp.x5xHNmdgGJ + rm /tmp/tmp.tsJvjx2CsW /tmp/tmp.x5xHNmdgGJ + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.DZc6RYLir1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gQXxXlQNCn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DZc6RYLir1 ++ cat /tmp/tmp.gQXxXlQNCn ++ rm /tmp/tmp.DZc6RYLir1 /tmp/tmp.gQXxXlQNCn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9 --namespace=upgrade-consistency-25326 ++ mktemp + local LAST_OUT=/tmp/tmp.oRwRsEitIw ++ mktemp + local LAST_ERR=/tmp/tmp.eaoERbWibn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9 --namespace=upgrade-consistency-25326 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oRwRsEitIw Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster9" modified. + cat /tmp/tmp.eaoERbWibn + rm /tmp/tmp.oRwRsEitIw /tmp/tmp.eaoERbWibn + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ahMavx4VVO ++ mktemp + local LAST_ERR=/tmp/tmp.A03g8a3Xh7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ahMavx4VVO secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.A03g8a3Xh7 + rm /tmp/tmp.ahMavx4VVO /tmp/tmp.A03g8a3Xh7 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2002-42929599#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BXVmXM47zf + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-consistency-25326~ + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.fRt3uCbjal + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BXVmXM47zf deployment.apps/pxc-client created + cat /tmp/tmp.fRt3uCbjal + rm /tmp/tmp.BXVmXM47zf /tmp/tmp.fRt3uCbjal + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Tu1XSFY59I ++ mktemp + local LAST_ERR=/tmp/tmp.zGSE81QfxU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Tu1XSFY59I secret/my-cluster-secrets created + cat /tmp/tmp.zGSE81QfxU + rm /tmp/tmp.Tu1XSFY59I /tmp/tmp.zGSE81QfxU + return 0 + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/some-name.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/some-name.yml + yq eval '.spec.crVersion = "1.17.0"' - + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.Ol0JPdXgEY + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2002-42929599#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_ERR=/tmp/tmp.1NAQAiRSBC + local exit_status=0 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-consistency-25326~ ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ol0JPdXgEY perconaxtradbcluster.pxc.percona.com/some-name created + cat /tmp/tmp.1NAQAiRSBC + rm /tmp/tmp.Ol0JPdXgEY /tmp/tmp.1NAQAiRSBC + return 0 + desc 'test 1.17.0' + set +o xtrace ----------------------------------------------------------------------------------- test 1.17.0 ----------------------------------------------------------------------------------- + kubectl_bin patch pxc some-name --type=merge --patch '{ "spec": {"crVersion":"1.17.0"} }' ++ mktemp + local LAST_OUT=/tmp/tmp.yUpw45rrBl ++ mktemp + local LAST_ERR=/tmp/tmp.dJjdRFXsLy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc some-name --type=merge --patch '{ "spec": {"crVersion":"1.17.0"} }' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yUpw45rrBl perconaxtradbcluster.pxc.percona.com/some-name patched + cat /tmp/tmp.dJjdRFXsLy + rm /tmp/tmp.yUpw45rrBl /tmp/tmp.dJjdRFXsLy + return 0 + wait_cluster_consistency some-name 3 2 + local cluster_name=some-name + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/some-name to be ready' waiting for pxc/some-name to be ready++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QIbFEKthce +++ mktemp ++ local LAST_ERR=/tmp/tmp.2AcaTQBWF7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QIbFEKthce ++ cat /tmp/tmp.2AcaTQBWF7 ++ rm /tmp/tmp.QIbFEKthce /tmp/tmp.2AcaTQBWF7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ckGFv4L4lV +++ mktemp ++ local LAST_ERR=/tmp/tmp.fOYC8Zvpb1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ckGFv4L4lV ++ cat /tmp/tmp.fOYC8Zvpb1 ++ rm /tmp/tmp.ckGFv4L4lV /tmp/tmp.fOYC8Zvpb1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dxWj0cqKoU +++ mktemp ++ local LAST_ERR=/tmp/tmp.T8oxQbgwTg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dxWj0cqKoU ++ cat /tmp/tmp.T8oxQbgwTg ++ rm /tmp/tmp.dxWj0cqKoU /tmp/tmp.T8oxQbgwTg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fNdVcrtfTu +++ mktemp ++ local LAST_ERR=/tmp/tmp.I7awfm5ssj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fNdVcrtfTu ++ cat /tmp/tmp.I7awfm5ssj ++ rm /tmp/tmp.fNdVcrtfTu /tmp/tmp.I7awfm5ssj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wB6TL6BdWx +++ mktemp ++ local LAST_ERR=/tmp/tmp.5nwcbWmkQk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wB6TL6BdWx ++ cat /tmp/tmp.5nwcbWmkQk ++ rm /tmp/tmp.wB6TL6BdWx /tmp/tmp.5nwcbWmkQk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JAJsIVPGx1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JVA9kJ3PYL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JAJsIVPGx1 ++ cat /tmp/tmp.JVA9kJ3PYL ++ rm /tmp/tmp.JAJsIVPGx1 /tmp/tmp.JVA9kJ3PYL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LeMwc7hm1R +++ mktemp ++ local LAST_ERR=/tmp/tmp.gbCK4BoK8j ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LeMwc7hm1R ++ cat /tmp/tmp.gbCK4BoK8j ++ rm /tmp/tmp.LeMwc7hm1R /tmp/tmp.gbCK4BoK8j ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YkJghsBpPu +++ mktemp ++ local LAST_ERR=/tmp/tmp.3EM0yftvQ2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YkJghsBpPu ++ cat /tmp/tmp.3EM0yftvQ2 ++ rm /tmp/tmp.YkJghsBpPu /tmp/tmp.3EM0yftvQ2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.54fTxc3dKT +++ mktemp ++ local LAST_ERR=/tmp/tmp.c6RXosKHKI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.54fTxc3dKT ++ cat /tmp/tmp.c6RXosKHKI ++ rm /tmp/tmp.54fTxc3dKT /tmp/tmp.c6RXosKHKI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IzkImeCA1Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.XMzNoAYpeo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IzkImeCA1Z ++ cat /tmp/tmp.XMzNoAYpeo ++ rm /tmp/tmp.IzkImeCA1Z /tmp/tmp.XMzNoAYpeo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pKmNqpJzMB +++ mktemp ++ local LAST_ERR=/tmp/tmp.GVlBgGjpVX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pKmNqpJzMB ++ cat /tmp/tmp.GVlBgGjpVX ++ rm /tmp/tmp.pKmNqpJzMB /tmp/tmp.GVlBgGjpVX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UMsd94fAuc +++ mktemp ++ local LAST_ERR=/tmp/tmp.1UT2ubZwmB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UMsd94fAuc ++ cat /tmp/tmp.1UT2ubZwmB ++ rm /tmp/tmp.UMsd94fAuc /tmp/tmp.1UT2ubZwmB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6lmgDLh6wI +++ mktemp ++ local LAST_ERR=/tmp/tmp.LLRk3fHP1L ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6lmgDLh6wI ++ cat /tmp/tmp.LLRk3fHP1L ++ rm /tmp/tmp.6lmgDLh6wI /tmp/tmp.LLRk3fHP1L ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SDqhoXZt4z +++ mktemp ++ local LAST_ERR=/tmp/tmp.f6tV80BzqK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SDqhoXZt4z ++ cat /tmp/tmp.f6tV80BzqK ++ rm /tmp/tmp.SDqhoXZt4z /tmp/tmp.f6tV80BzqK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.upSQTsehUF +++ mktemp ++ local LAST_ERR=/tmp/tmp.QYDtwzvxbH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.upSQTsehUF ++ cat /tmp/tmp.QYDtwzvxbH ++ rm /tmp/tmp.upSQTsehUF /tmp/tmp.QYDtwzvxbH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QuuCMvuupY +++ mktemp ++ local LAST_ERR=/tmp/tmp.FaEfngVqun ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QuuCMvuupY ++ cat /tmp/tmp.FaEfngVqun ++ rm /tmp/tmp.QuuCMvuupY /tmp/tmp.FaEfngVqun ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WqOd4kg7Iw +++ mktemp ++ local LAST_ERR=/tmp/tmp.2N8NCSPErQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WqOd4kg7Iw ++ cat /tmp/tmp.2N8NCSPErQ ++ rm /tmp/tmp.WqOd4kg7Iw /tmp/tmp.2N8NCSPErQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z3HLGBByfr +++ mktemp ++ local LAST_ERR=/tmp/tmp.D03DQ4KO5V ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.z3HLGBByfr ++ cat /tmp/tmp.D03DQ4KO5V ++ rm /tmp/tmp.z3HLGBByfr /tmp/tmp.D03DQ4KO5V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XJybKM3ohO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gm7ZFj5wzn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XJybKM3ohO ++ cat /tmp/tmp.Gm7ZFj5wzn ++ rm /tmp/tmp.XJybKM3ohO /tmp/tmp.Gm7ZFj5wzn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1n3GAyWCkJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.4UhjyGzh75 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1n3GAyWCkJ ++ cat /tmp/tmp.4UhjyGzh75 ++ rm /tmp/tmp.1n3GAyWCkJ /tmp/tmp.4UhjyGzh75 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MRzg7Nc4bw +++ mktemp ++ local LAST_ERR=/tmp/tmp.BdEd12xt4F ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MRzg7Nc4bw ++ cat /tmp/tmp.BdEd12xt4F ++ rm /tmp/tmp.MRzg7Nc4bw /tmp/tmp.BdEd12xt4F ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vpvIP9r1Wt +++ mktemp ++ local LAST_ERR=/tmp/tmp.XeOcRcVFX2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vpvIP9r1Wt ++ cat /tmp/tmp.XeOcRcVFX2 ++ rm /tmp/tmp.vpvIP9r1Wt /tmp/tmp.XeOcRcVFX2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RGCUw07s1k +++ mktemp ++ local LAST_ERR=/tmp/tmp.Joordf2G05 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RGCUw07s1k ++ cat /tmp/tmp.Joordf2G05 ++ rm /tmp/tmp.RGCUw07s1k /tmp/tmp.Joordf2G05 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LaUaIZ5JFe +++ mktemp ++ local LAST_ERR=/tmp/tmp.SHeyYT1SIt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LaUaIZ5JFe ++ cat /tmp/tmp.SHeyYT1SIt ++ rm /tmp/tmp.LaUaIZ5JFe /tmp/tmp.SHeyYT1SIt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.elZIS1Mvyz +++ mktemp ++ local LAST_ERR=/tmp/tmp.TbjkWK1GTj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.elZIS1Mvyz ++ cat /tmp/tmp.TbjkWK1GTj ++ rm /tmp/tmp.elZIS1Mvyz /tmp/tmp.TbjkWK1GTj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 24 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AbAqkqa5YS +++ mktemp ++ local LAST_ERR=/tmp/tmp.dwzDEmQcYr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AbAqkqa5YS ++ cat /tmp/tmp.dwzDEmQcYr ++ rm /tmp/tmp.AbAqkqa5YS /tmp/tmp.dwzDEmQcYr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 25 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2DHJnHisyh +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeFsUz3aYE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2DHJnHisyh ++ cat /tmp/tmp.VeFsUz3aYE ++ rm /tmp/tmp.2DHJnHisyh /tmp/tmp.VeFsUz3aYE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 26 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.35LfwPMnkQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.iAoc4pq1dV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.35LfwPMnkQ ++ cat /tmp/tmp.iAoc4pq1dV ++ rm /tmp/tmp.35LfwPMnkQ /tmp/tmp.iAoc4pq1dV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 27 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LRvtHLz9HF +++ mktemp ++ local LAST_ERR=/tmp/tmp.WD2iSHgTnc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LRvtHLz9HF ++ cat /tmp/tmp.WD2iSHgTnc ++ rm /tmp/tmp.LRvtHLz9HF /tmp/tmp.WD2iSHgTnc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 28 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k66noj87a2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FOd0JKTnWG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.k66noj87a2 ++ cat /tmp/tmp.FOd0JKTnWG ++ rm /tmp/tmp.k66noj87a2 /tmp/tmp.FOd0JKTnWG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 29 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C8kdNkAeJ9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.E0BGMZvOT7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.C8kdNkAeJ9 ++ cat /tmp/tmp.E0BGMZvOT7 ++ rm /tmp/tmp.C8kdNkAeJ9 /tmp/tmp.E0BGMZvOT7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 30 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y1h0IMdDZG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZyOC7cQARK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Y1h0IMdDZG ++ cat /tmp/tmp.ZyOC7cQARK ++ rm /tmp/tmp.Y1h0IMdDZG /tmp/tmp.ZyOC7cQARK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 31 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5BNlvnVZ76 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Twb3jNjXjB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5BNlvnVZ76 ++ cat /tmp/tmp.Twb3jNjXjB ++ rm /tmp/tmp.5BNlvnVZ76 /tmp/tmp.Twb3jNjXjB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 32 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SYR1V21uST +++ mktemp ++ local LAST_ERR=/tmp/tmp.l7zk5xeDfM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SYR1V21uST ++ cat /tmp/tmp.l7zk5xeDfM ++ rm /tmp/tmp.SYR1V21uST /tmp/tmp.l7zk5xeDfM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 33 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V10J5snALB +++ mktemp ++ local LAST_ERR=/tmp/tmp.dS4jOvzi8i ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.V10J5snALB ++ cat /tmp/tmp.dS4jOvzi8i ++ rm /tmp/tmp.V10J5snALB /tmp/tmp.dS4jOvzi8i ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AsyldoeAlw +++ mktemp ++ local LAST_ERR=/tmp/tmp.KN6gpMWyrk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AsyldoeAlw ++ cat /tmp/tmp.KN6gpMWyrk ++ rm /tmp/tmp.AsyldoeAlw /tmp/tmp.KN6gpMWyrk ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine some-name +++ local cluster_name=some-name ++++ get_proxy some-name ++++ local target_cluster=some-name +++++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.eumILtj8OA ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.q3apypPPr2 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.eumILtj8OA +++++ cat /tmp/tmp.q3apypPPr2 +++++ rm /tmp/tmp.eumILtj8OA /tmp/tmp.q3apypPPr2 +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.cMbTAi7Tuz ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hh0tOjTkUH +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.cMbTAi7Tuz +++++ cat /tmp/tmp.hh0tOjTkUH +++++ rm /tmp/tmp.cMbTAi7Tuz /tmp/tmp.hh0tOjTkUH +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo some-name-proxysql ++++ return +++ local cluster_proxy=some-name-proxysql +++ echo proxysql ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZvAKG3Lz9F +++ mktemp ++ local LAST_ERR=/tmp/tmp.iRt8wNdSTb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZvAKG3Lz9F ++ cat /tmp/tmp.iRt8wNdSTb ++ rm /tmp/tmp.ZvAKG3Lz9F /tmp/tmp.iRt8wNdSTb ++ return 0 + [[ 2 == \2 ]] + echo + wait_for_sts_generation some-name-pxc 1 1 + local name=some-name-pxc + local desired_generation=1 + local size=1 + set +o xtrace Waiting for statefulset readyness + desc 'compare k8s objects' + set +o xtrace ----------------------------------------------------------------------------------- compare k8s objects ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-pxc -1170 + local resource=service/some-name-pxc + local postfix=-1170 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170.yml + local new_result=/tmp/tmp.bg7yHYufCv/service_some-name-pxc.yml + desc 'compare service/some-name-pxc--1170' + set +o xtrace ----------------------------------------------------------------------------------- compare service/some-name-pxc--1170 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.24' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170-aks.yml ']' + kubectl_bin get -o yaml service/some-name-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.h4kDOhWhPt ++ mktemp + local LAST_ERR=/tmp/tmp.N09frM1a9t + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/some-name-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.h4kDOhWhPt + cat /tmp/tmp.N09frM1a9t + rm /tmp/tmp.h4kDOhWhPt /tmp/tmp.N09frM1a9t + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1170.yml /tmp/tmp.bg7yHYufCv/service_some-name-pxc.yml + compare_kubectl service/some-name-proxysql -1170 + local resource=service/some-name-proxysql + local postfix=-1170 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170.yml + local new_result=/tmp/tmp.bg7yHYufCv/service_some-name-proxysql.yml + desc 'compare service/some-name-proxysql--1170' + set +o xtrace ----------------------------------------------------------------------------------- compare service/some-name-proxysql--1170 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170-aks.yml ']' + kubectl_bin get -o yaml service/some-name-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.3jLWbADPD6 ++ mktemp + local LAST_ERR=/tmp/tmp.qcgQDpidtd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/some-name-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3jLWbADPD6 + cat /tmp/tmp.qcgQDpidtd + rm /tmp/tmp.3jLWbADPD6 /tmp/tmp.qcgQDpidtd + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1170.yml /tmp/tmp.bg7yHYufCv/service_some-name-proxysql.yml + compare_kubectl statefulset/some-name-pxc -1170 + local resource=statefulset/some-name-pxc + local postfix=-1170 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170.yml + local new_result=/tmp/tmp.bg7yHYufCv/statefulset_some-name-pxc.yml + desc 'compare statefulset/some-name-pxc--1170' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/some-name-pxc--1170 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170-aks.yml ']' + kubectl_bin get -o yaml statefulset/some-name-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.CwW6eKEZ7J ++ mktemp + local LAST_ERR=/tmp/tmp.ZLIEzFujnK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CwW6eKEZ7J + cat /tmp/tmp.ZLIEzFujnK + rm /tmp/tmp.CwW6eKEZ7J /tmp/tmp.ZLIEzFujnK + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1170.yml /tmp/tmp.bg7yHYufCv/statefulset_some-name-pxc.yml + compare_kubectl statefulset/some-name-proxysql -1170 + local resource=statefulset/some-name-proxysql + local postfix=-1170 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170.yml + local new_result=/tmp/tmp.bg7yHYufCv/statefulset_some-name-proxysql.yml + desc 'compare statefulset/some-name-proxysql--1170' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/some-name-proxysql--1170 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.33' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.24' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170-aks.yml ']' + kubectl_bin get -o yaml statefulset/some-name-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.Rzz4JN2qbx ++ mktemp + local LAST_ERR=/tmp/tmp.kCfRy2f4F1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Rzz4JN2qbx + cat /tmp/tmp.kCfRy2f4F1 + rm /tmp/tmp.Rzz4JN2qbx /tmp/tmp.kCfRy2f4F1 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-proxysql-1170.yml /tmp/tmp.bg7yHYufCv/statefulset_some-name-proxysql.yml + desc 'test 1.18.0' + set +o xtrace ----------------------------------------------------------------------------------- test 1.18.0 ----------------------------------------------------------------------------------- + kubectl_bin patch pxc some-name --type=merge --patch '{ "spec": {"crVersion":"1.18.0"} }' ++ mktemp + local LAST_OUT=/tmp/tmp.BXhZMRvUr0 ++ mktemp + local LAST_ERR=/tmp/tmp.bdfWKuMwTZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc some-name --type=merge --patch '{ "spec": {"crVersion":"1.18.0"} }' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BXhZMRvUr0 perconaxtradbcluster.pxc.percona.com/some-name patched + cat /tmp/tmp.bdfWKuMwTZ + rm /tmp/tmp.BXhZMRvUr0 /tmp/tmp.bdfWKuMwTZ + return 0 + wait_cluster_consistency some-name 3 2 + local cluster_name=some-name + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/some-name to be ready' waiting for pxc/some-name to be ready++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7SjqjLoxgz +++ mktemp ++ local LAST_ERR=/tmp/tmp.bHsmEg7IKF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7SjqjLoxgz ++ cat /tmp/tmp.bHsmEg7IKF ++ rm /tmp/tmp.7SjqjLoxgz /tmp/tmp.bHsmEg7IKF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hXTtqI5cia +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Zz7A22IIW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hXTtqI5cia ++ cat /tmp/tmp.1Zz7A22IIW ++ rm /tmp/tmp.hXTtqI5cia /tmp/tmp.1Zz7A22IIW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XzixFdesY3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mox8cN23b8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XzixFdesY3 ++ cat /tmp/tmp.mox8cN23b8 ++ rm /tmp/tmp.XzixFdesY3 /tmp/tmp.mox8cN23b8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jp64ttmDYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.a9UyZRa0o7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jp64ttmDYK ++ cat /tmp/tmp.a9UyZRa0o7 ++ rm /tmp/tmp.jp64ttmDYK /tmp/tmp.a9UyZRa0o7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L689SZF67z +++ mktemp ++ local LAST_ERR=/tmp/tmp.YXtoyDEAjZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.L689SZF67z ++ cat /tmp/tmp.YXtoyDEAjZ ++ rm /tmp/tmp.L689SZF67z /tmp/tmp.YXtoyDEAjZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hSwRVx6cXx +++ mktemp ++ local LAST_ERR=/tmp/tmp.IwFtsBe7eq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hSwRVx6cXx ++ cat /tmp/tmp.IwFtsBe7eq ++ rm /tmp/tmp.hSwRVx6cXx /tmp/tmp.IwFtsBe7eq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q1yyFyHSSR +++ mktemp ++ local LAST_ERR=/tmp/tmp.JBa3gG5HlX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Q1yyFyHSSR ++ cat /tmp/tmp.JBa3gG5HlX ++ rm /tmp/tmp.Q1yyFyHSSR /tmp/tmp.JBa3gG5HlX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wniBWYooIA +++ mktemp ++ local LAST_ERR=/tmp/tmp.popRDmZH8G ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wniBWYooIA ++ cat /tmp/tmp.popRDmZH8G ++ rm /tmp/tmp.wniBWYooIA /tmp/tmp.popRDmZH8G ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YqQwDbxfyt +++ mktemp ++ local LAST_ERR=/tmp/tmp.L289RwjDKk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YqQwDbxfyt ++ cat /tmp/tmp.L289RwjDKk ++ rm /tmp/tmp.YqQwDbxfyt /tmp/tmp.L289RwjDKk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DpttP8S8OR +++ mktemp ++ local LAST_ERR=/tmp/tmp.9FWEGO1ANU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DpttP8S8OR ++ cat /tmp/tmp.9FWEGO1ANU ++ rm /tmp/tmp.DpttP8S8OR /tmp/tmp.9FWEGO1ANU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nwotrKGTnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.NZ6zHjJVGi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nwotrKGTnk ++ cat /tmp/tmp.NZ6zHjJVGi ++ rm /tmp/tmp.nwotrKGTnk /tmp/tmp.NZ6zHjJVGi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aiAs6JCWSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.xxONnKNcaY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aiAs6JCWSV ++ cat /tmp/tmp.xxONnKNcaY ++ rm /tmp/tmp.aiAs6JCWSV /tmp/tmp.xxONnKNcaY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JqDs3Eq7ey +++ mktemp ++ local LAST_ERR=/tmp/tmp.DppIIFTzO9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JqDs3Eq7ey ++ cat /tmp/tmp.DppIIFTzO9 ++ rm /tmp/tmp.JqDs3Eq7ey /tmp/tmp.DppIIFTzO9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YE4NtszuiB +++ mktemp ++ local LAST_ERR=/tmp/tmp.jwOcOcMknL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YE4NtszuiB ++ cat /tmp/tmp.jwOcOcMknL ++ rm /tmp/tmp.YE4NtszuiB /tmp/tmp.jwOcOcMknL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B02A2clevF +++ mktemp ++ local LAST_ERR=/tmp/tmp.IU3X7uaEvN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.B02A2clevF ++ cat /tmp/tmp.IU3X7uaEvN ++ rm /tmp/tmp.B02A2clevF /tmp/tmp.IU3X7uaEvN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SMlrY5fT1Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.lHowTbC7jx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SMlrY5fT1Q ++ cat /tmp/tmp.lHowTbC7jx ++ rm /tmp/tmp.SMlrY5fT1Q /tmp/tmp.lHowTbC7jx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GvxBrCEmRy +++ mktemp ++ local LAST_ERR=/tmp/tmp.s1H2ihD2Pi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GvxBrCEmRy ++ cat /tmp/tmp.s1H2ihD2Pi ++ rm /tmp/tmp.GvxBrCEmRy /tmp/tmp.s1H2ihD2Pi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OygT0OUKYp +++ mktemp ++ local LAST_ERR=/tmp/tmp.jRXITM0YhB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OygT0OUKYp ++ cat /tmp/tmp.jRXITM0YhB ++ rm /tmp/tmp.OygT0OUKYp /tmp/tmp.jRXITM0YhB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tkApply8sV +++ mktemp ++ local LAST_ERR=/tmp/tmp.b0cEMpxJUQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tkApply8sV ++ cat /tmp/tmp.b0cEMpxJUQ ++ rm /tmp/tmp.tkApply8sV /tmp/tmp.b0cEMpxJUQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ebn4c2jRcR +++ mktemp ++ local LAST_ERR=/tmp/tmp.1tV3M5cSCc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ebn4c2jRcR ++ cat /tmp/tmp.1tV3M5cSCc ++ rm /tmp/tmp.ebn4c2jRcR /tmp/tmp.1tV3M5cSCc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.03ld7AZsX3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lCtazCXtm8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 ']' ++ '[' 1 == 1 ']' ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.03ld7AZsX3 ++ cat /tmp/tmp.lCtazCXtm8 ++ rm /tmp/tmp.03ld7AZsX3 /tmp/tmp.lCtazCXtm8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TJjhXcJwB4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BrN77MplPT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TJjhXcJwB4 ++ cat /tmp/tmp.BrN77MplPT ++ rm /tmp/tmp.TJjhXcJwB4 /tmp/tmp.BrN77MplPT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zjmk2qyNOa +++ mktemp ++ local LAST_ERR=/tmp/tmp.ia7pIPyphd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zjmk2qyNOa ++ cat /tmp/tmp.ia7pIPyphd ++ rm /tmp/tmp.zjmk2qyNOa /tmp/tmp.ia7pIPyphd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8gnXLAVhZE +++ mktemp ++ local LAST_ERR=/tmp/tmp.xnzJskQN9x ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8gnXLAVhZE ++ cat /tmp/tmp.xnzJskQN9x ++ rm /tmp/tmp.8gnXLAVhZE /tmp/tmp.xnzJskQN9x ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V1E9c5ZUd5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MTY1vD1sLa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.V1E9c5ZUd5 ++ cat /tmp/tmp.MTY1vD1sLa ++ rm /tmp/tmp.V1E9c5ZUd5 /tmp/tmp.MTY1vD1sLa ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 24 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j5a6JfTt37 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SYiMxI3mhP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.j5a6JfTt37 ++ cat /tmp/tmp.SYiMxI3mhP ++ rm /tmp/tmp.j5a6JfTt37 /tmp/tmp.SYiMxI3mhP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 25 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BAxgHasYQx +++ mktemp ++ local LAST_ERR=/tmp/tmp.TZrq2euqa8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BAxgHasYQx ++ cat /tmp/tmp.TZrq2euqa8 ++ rm /tmp/tmp.BAxgHasYQx /tmp/tmp.TZrq2euqa8 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8tDZzMMaxR +++ mktemp ++ local LAST_ERR=/tmp/tmp.IVqj4R9eVO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8tDZzMMaxR ++ cat /tmp/tmp.IVqj4R9eVO ++ rm /tmp/tmp.8tDZzMMaxR /tmp/tmp.IVqj4R9eVO ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine some-name +++ local cluster_name=some-name ++++ get_proxy some-name ++++ local target_cluster=some-name +++++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.0lkjVP4l6f ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qp2GqO0Z7Y +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.0lkjVP4l6f +++++ cat /tmp/tmp.qp2GqO0Z7Y +++++ rm /tmp/tmp.0lkjVP4l6f /tmp/tmp.qp2GqO0Z7Y +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.oEnLjCFxJx ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.GjXwEjtJjy +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc some-name -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.oEnLjCFxJx +++++ cat /tmp/tmp.GjXwEjtJjy +++++ rm /tmp/tmp.oEnLjCFxJx /tmp/tmp.GjXwEjtJjy +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo some-name-proxysql ++++ return +++ local cluster_proxy=some-name-proxysql +++ echo proxysql ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mBYOLHrGSB +++ mktemp ++ local LAST_ERR=/tmp/tmp.SQQlCAkXmd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mBYOLHrGSB ++ cat /tmp/tmp.SQQlCAkXmd ++ rm /tmp/tmp.mBYOLHrGSB /tmp/tmp.SQQlCAkXmd ++ return 0 + [[ 2 == \2 ]] + echo + wait_for_sts_generation some-name-pxc 2 1 + local name=some-name-pxc + local desired_generation=2 + local size=1 + set +o xtrace Waiting for statefulset readyness + desc 'compare k8s objects' + set +o xtrace ----------------------------------------------------------------------------------- compare k8s objects ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-pxc -1180 + local resource=service/some-name-pxc + local postfix=-1180 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180.yml + local new_result=/tmp/tmp.bg7yHYufCv/service_some-name-pxc.yml + desc 'compare service/some-name-pxc--1180' + set +o xtrace ----------------------------------------------------------------------------------- compare service/some-name-pxc--1180 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.21' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180-aks.yml ']' + kubectl_bin get -o yaml service/some-name-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.jSdpnNQ94z ++ mktemp + local LAST_ERR=/tmp/tmp.F68GB33s0M + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/some-name-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jSdpnNQ94z + cat /tmp/tmp.F68GB33s0M + rm /tmp/tmp.jSdpnNQ94z /tmp/tmp.F68GB33s0M + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-pxc-1180.yml /tmp/tmp.bg7yHYufCv/service_some-name-pxc.yml + compare_kubectl service/some-name-proxysql -1180 + local resource=service/some-name-proxysql + local postfix=-1180 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180.yml + local new_result=/tmp/tmp.bg7yHYufCv/service_some-name-proxysql.yml + desc 'compare service/some-name-proxysql--1180' + set +o xtrace ----------------------------------------------------------------------------------- compare service/some-name-proxysql--1180 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.33' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.24' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180-aks.yml ']' + kubectl_bin get -o yaml service/some-name-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.jeTFwmNXub ++ mktemp + local LAST_ERR=/tmp/tmp.2ke7QvCn7x + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/some-name-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jeTFwmNXub + cat /tmp/tmp.2ke7QvCn7x + rm /tmp/tmp.jeTFwmNXub /tmp/tmp.2ke7QvCn7x + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/service_some-name-proxysql-1180.yml /tmp/tmp.bg7yHYufCv/service_some-name-proxysql.yml + compare_kubectl statefulset/some-name-pxc -1180 + local resource=statefulset/some-name-pxc + local postfix=-1180 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180.yml + local new_result=/tmp/tmp.bg7yHYufCv/statefulset_some-name-pxc.yml + desc 'compare statefulset/some-name-pxc--1180' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/some-name-pxc--1180 ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180-aks.yml ']' + kubectl_bin get -o yaml statefulset/some-name-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-25326", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.eWtcMy0po2 ++ mktemp + local LAST_ERR=/tmp/tmp.OxlCuzTN7o + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eWtcMy0po2 + cat /tmp/tmp.OxlCuzTN7o + rm /tmp/tmp.eWtcMy0po2 /tmp/tmp.OxlCuzTN7o + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180.yml /tmp/tmp.bg7yHYufCv/statefulset_some-name-pxc.yml --- /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-consistency/compare/statefulset_some-name-pxc-1180.yml 2025-11-04 07:54:10.976958718 +0000 +++ /tmp/tmp.bg7yHYufCv/statefulset_some-name-pxc.yml 2025-11-04 09:52:01.796946650 +0000 @@ -129,6 +129,8 @@ value: /var/lib/mysql/notify.sock - name: MYSQL_STATE_FILE value: /var/lib/mysql/mysql.state + - name: LD_PRELOAD + value: /usr/lib64/libjemalloc.so.1 envFrom: - secretRef: name: some-name-env-vars-pxc