Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/logs/affinity-8-0.log WARNING: version difference between client (1.32) and server (1.28) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.32) and server (1.28) exceeds the supported minor version skew of +/-1 + main + create_infra affinity-9648 + local ns=affinity-9648 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n affinity-30663 custom --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/custom patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.9gCXjOzCYV ++ mktemp + local LAST_ERR=/tmp/tmp.ZIJRdBgjS5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9gCXjOzCYV perconaxtradbcluster.pxc.percona.com "custom" deleted + cat /tmp/tmp.ZIJRdBgjS5 + rm /tmp/tmp.9gCXjOzCYV /tmp/tmp.ZIJRdBgjS5 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.DZebuA67Nf ++ mktemp + local LAST_ERR=/tmp/tmp.2ujjwC5JpL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DZebuA67Nf No resources found + cat /tmp/tmp.2ujjwC5JpL + rm /tmp/tmp.DZebuA67Nf /tmp/tmp.2ujjwC5JpL + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.BI7hmZHmD5 ++ mktemp + local LAST_ERR=/tmp/tmp.IhL3pg2Keg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BI7hmZHmD5 No resources found + cat /tmp/tmp.IhL3pg2Keg + rm /tmp/tmp.BI7hmZHmD5 /tmp/tmp.IhL3pg2Keg + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.OpALF4yIyN + local LAST_OUT=/tmp/tmp.fCG9zdZ8OX ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.TmK50x6rO4 + local exit_status=0 + local LAST_ERR=/tmp/tmp.YSgrR0qSyE + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fCG9zdZ8OX + cat /tmp/tmp.YSgrR0qSyE + rm /tmp/tmp.fCG9zdZ8OX /tmp/tmp.YSgrR0qSyE + return 0 namespace "affinity-30663" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OpALF4yIyN namespace "pxc-operator" deleted + cat /tmp/tmp.TmK50x6rO4 + rm /tmp/tmp.OpALF4yIyN /tmp/tmp.TmK50x6rO4 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.W9Lg09UMFS ++ mktemp + local LAST_ERR=/tmp/tmp.HM8miixvWl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.W9Lg09UMFS namespace/pxc-operator created + cat /tmp/tmp.HM8miixvWl + rm /tmp/tmp.W9Lg09UMFS /tmp/tmp.HM8miixvWl + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rZ4b5Zcg57 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nlcp3GHydU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rZ4b5Zcg57 ++ cat /tmp/tmp.Nlcp3GHydU ++ rm /tmp/tmp.rZ4b5Zcg57 /tmp/tmp.Nlcp3GHydU ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WsBqsGFYOy ++ mktemp + local LAST_ERR=/tmp/tmp.P88ahkWNSD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WsBqsGFYOy Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1" modified. + cat /tmp/tmp.P88ahkWNSD + rm /tmp/tmp.WsBqsGFYOy /tmp/tmp.P88ahkWNSD + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.D0AMJtNWpL ++ mktemp + local LAST_ERR=/tmp/tmp.xzhFpxrwKP + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D0AMJtNWpL customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.xzhFpxrwKP + rm /tmp/tmp.D0AMJtNWpL /tmp/tmp.xzhFpxrwKP + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yJcqJMESVA ++ mktemp + local LAST_ERR=/tmp/tmp.ypsxO1eGDe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yJcqJMESVA clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.ypsxO1eGDe + rm /tmp/tmp.yJcqJMESVA /tmp/tmp.ypsxO1eGDe + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1916-e6a59489^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.c6YCM17rbT ++ mktemp + local LAST_ERR=/tmp/tmp.RKaJ8wZG9V + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c6YCM17rbT deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.RKaJ8wZG9V + rm /tmp/tmp.c6YCM17rbT /tmp/tmp.RKaJ8wZG9V + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.7M2KqYG2ug ++ mktemp + local LAST_ERR=/tmp/tmp.LmlmxwXqSX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7M2KqYG2ug pod/percona-xtradb-cluster-operator-646c44b8c8-c5j6j condition met + cat /tmp/tmp.LmlmxwXqSX + rm /tmp/tmp.7M2KqYG2ug /tmp/tmp.LmlmxwXqSX + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.9bgn4dwBL4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NeeOCrErRj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9bgn4dwBL4 ++ cat /tmp/tmp.NeeOCrErRj ++ rm /tmp/tmp.9bgn4dwBL4 /tmp/tmp.NeeOCrErRj ++ return 0 + wait_pod percona-xtradb-cluster-operator-646c44b8c8-c5j6j 480 pxc-operator + local pod=percona-xtradb-cluster-operator-646c44b8c8-c5j6j + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-646c44b8c8-c5j6j ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-646c44b8c8-c5j6j condition met waiting for pod/percona-xtradb-cluster-operator-646c44b8c8-c5j6j to become Ready.Ok + sleep 3 + create_namespace affinity-9648 + local namespace=affinity-9648 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces affinity-9648' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces affinity-9648 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace affinity-9648 ++ mktemp ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.7aS7pqKMH1 ++ mktemp + local LAST_OUT=/tmp/tmp.1AOedO4Elf ++ mktemp + local LAST_ERR=/tmp/tmp.15ZEuYJAHO + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.dL0npfMasR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace affinity-9648 + for i in '$(seq 0 2)' + set +e + kubectl get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1AOedO4Elf + cat /tmp/tmp.dL0npfMasR + rm /tmp/tmp.1AOedO4Elf /tmp/tmp.dL0npfMasR + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace affinity-9648 namespace "gke-managed-system" deleted namespace "gmp-public" deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace affinity-9648 namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.7aS7pqKMH1 + cat /tmp/tmp.15ZEuYJAHO Error from server (NotFound): namespaces "affinity-9648" not found + rm /tmp/tmp.7aS7pqKMH1 /tmp/tmp.15ZEuYJAHO + return 1 + : + wait_for_delete namespace/affinity-9648 + local res=namespace/affinity-9648 + echo -n 'waiting for namespace/affinity-9648 to be deleted' waiting for namespace/affinity-9648 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "affinity-9648" not found + desc 'create namespace affinity-9648' + set +o xtrace ----------------------------------------------------------------------------------- create namespace affinity-9648 ----------------------------------------------------------------------------------- + kubectl_bin create namespace affinity-9648 ++ mktemp + local LAST_OUT=/tmp/tmp.DyR5jSxOe7 ++ mktemp + local LAST_ERR=/tmp/tmp.hQjRL61xhr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace affinity-9648 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DyR5jSxOe7 namespace/affinity-9648 created + cat /tmp/tmp.hQjRL61xhr + rm /tmp/tmp.DyR5jSxOe7 /tmp/tmp.hQjRL61xhr + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.aGn6AuPpcM +++ mktemp ++ local LAST_ERR=/tmp/tmp.FPpGyNrGfM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aGn6AuPpcM ++ cat /tmp/tmp.FPpGyNrGfM ++ rm /tmp/tmp.aGn6AuPpcM /tmp/tmp.FPpGyNrGfM ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1 --namespace=affinity-9648 ++ mktemp + local LAST_OUT=/tmp/tmp.w1Grbp5KYl ++ mktemp + local LAST_ERR=/tmp/tmp.z3uCZwpmLJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1 --namespace=affinity-9648 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.w1Grbp5KYl Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1916-e6a59489-8-cluster1" modified. + cat /tmp/tmp.z3uCZwpmLJ + rm /tmp/tmp.w1Grbp5KYl /tmp/tmp.z3uCZwpmLJ + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qywHnnpZbL ++ mktemp + local LAST_ERR=/tmp/tmp.LKMaqXzb0p + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qywHnnpZbL secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.LKMaqXzb0p + rm /tmp/tmp.qywHnnpZbL /tmp/tmp.LKMaqXzb0p + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.29rXprPe4t ++ mktemp + local LAST_ERR=/tmp/tmp.qMd1m9yCu9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.29rXprPe4t secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.qMd1m9yCu9 + rm /tmp/tmp.29rXprPe4t /tmp/tmp.qMd1m9yCu9 + return 0 + desc 'check hostname anti-affinity' + set +o xtrace ----------------------------------------------------------------------------------- check hostname anti-affinity ----------------------------------------------------------------------------------- + check_affinity hostname + local cluster=hostname + local skip_wait= + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/hostname.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/hostname.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/hostname.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.c1n5bmCpJX + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1916-e6a59489#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.affinity-9648~ + local LAST_ERR=/tmp/tmp.GcaWbCBktE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c1n5bmCpJX perconaxtradbcluster.pxc.percona.com/hostname created + cat /tmp/tmp.GcaWbCBktE + rm /tmp/tmp.c1n5bmCpJX /tmp/tmp.GcaWbCBktE + return 0 + desc 'check if at least 1 Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if at least 1 Pod started ----------------------------------------------------------------------------------- + '[' -z '' ']' + wait_for_running hostname-pxc 1 + local name=hostname-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod hostname-pxc-0 480 + local pod=hostname-pxc-0 + local max_retry=480 + local ns= ++ echo hostname-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/hostname-pxc-0 condition met waiting for pod/hostname-pxc-0 to become Ready.Ok + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/hostname-pxc + local resource=statefulset/hostname-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_hostname-pxc.yml + desc 'compare statefulset/hostname-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/hostname-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/hostname-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.6k7n7xWdfZ ++ mktemp + local LAST_ERR=/tmp/tmp.ZSwRDCnbB3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/hostname-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6k7n7xWdfZ + cat /tmp/tmp.ZSwRDCnbB3 + rm /tmp/tmp.6k7n7xWdfZ /tmp/tmp.ZSwRDCnbB3 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-pxc-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_hostname-pxc.yml + compare_kubectl statefulset/hostname-proxysql + local resource=statefulset/hostname-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_hostname-proxysql.yml + desc 'compare statefulset/hostname-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/hostname-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/hostname-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.4jmHkdYwst ++ mktemp + local LAST_ERR=/tmp/tmp.hrz9EHeVng + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/hostname-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4jmHkdYwst + cat /tmp/tmp.hrz9EHeVng + rm /tmp/tmp.4jmHkdYwst /tmp/tmp.hrz9EHeVng + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_hostname-proxysql-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_hostname-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/hostname.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DQzxcxb7QI ++ mktemp + local LAST_ERR=/tmp/tmp.1JCwy5hobY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/hostname.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DQzxcxb7QI perconaxtradbcluster.pxc.percona.com "hostname" deleted + cat /tmp/tmp.1JCwy5hobY + rm /tmp/tmp.DQzxcxb7QI /tmp/tmp.1JCwy5hobY + return 0 + desc 'check zone anti-affinity' + set +o xtrace ----------------------------------------------------------------------------------- check zone anti-affinity ----------------------------------------------------------------------------------- + check_affinity zone + local cluster=zone + local skip_wait= + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/zone.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/zone.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.sZwyk5rLVI + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/zone.yml + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.jFP3Gujc5K + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.affinity-9648~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1916-e6a59489#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sZwyk5rLVI perconaxtradbcluster.pxc.percona.com/zone created + cat /tmp/tmp.jFP3Gujc5K + rm /tmp/tmp.sZwyk5rLVI /tmp/tmp.jFP3Gujc5K + return 0 + desc 'check if at least 1 Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if at least 1 Pod started ----------------------------------------------------------------------------------- + '[' -z '' ']' + wait_for_running zone-pxc 1 + local name=zone-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod zone-pxc-0 480 + local pod=zone-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo zone-pxc-0 + local container=pxc + set +o xtrace pod/zone-pxc-0 condition met waiting for pod/zone-pxc-0 to become Ready.Ok + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/zone-pxc + local resource=statefulset/zone-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_zone-pxc.yml + desc 'compare statefulset/zone-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/zone-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/zone-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.nE93mHdfe8 ++ mktemp + local LAST_ERR=/tmp/tmp.VjeDHKzA6K + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/zone-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nE93mHdfe8 + cat /tmp/tmp.VjeDHKzA6K + rm /tmp/tmp.nE93mHdfe8 /tmp/tmp.VjeDHKzA6K + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-pxc-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_zone-pxc.yml + compare_kubectl statefulset/zone-proxysql + local resource=statefulset/zone-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_zone-proxysql.yml + desc 'compare statefulset/zone-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/zone-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/zone-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.qeMltdVFsC ++ mktemp + local LAST_ERR=/tmp/tmp.cFVlDQYyog + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/zone-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qeMltdVFsC + cat /tmp/tmp.cFVlDQYyog + rm /tmp/tmp.qeMltdVFsC /tmp/tmp.cFVlDQYyog + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_zone-proxysql-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_zone-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/zone.yml ++ mktemp + local LAST_OUT=/tmp/tmp.D7Izmlo6lv ++ mktemp + local LAST_ERR=/tmp/tmp.OGg1QnsuhU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/zone.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D7Izmlo6lv perconaxtradbcluster.pxc.percona.com "zone" deleted + cat /tmp/tmp.OGg1QnsuhU + rm /tmp/tmp.D7Izmlo6lv /tmp/tmp.OGg1QnsuhU + return 0 + desc 'check region anti-affinity' + set +o xtrace ----------------------------------------------------------------------------------- check region anti-affinity ----------------------------------------------------------------------------------- + check_affinity region + local cluster=region + local skip_wait= + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/region.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/region.yml + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1916-e6a59489#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.affinity-9648~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_OUT=/tmp/tmp.0mLnzEboVG + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + local LAST_ERR=/tmp/tmp.LFl852YE1P + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/region.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0mLnzEboVG perconaxtradbcluster.pxc.percona.com/region created + cat /tmp/tmp.LFl852YE1P + rm /tmp/tmp.0mLnzEboVG /tmp/tmp.LFl852YE1P + return 0 + desc 'check if at least 1 Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if at least 1 Pod started ----------------------------------------------------------------------------------- + '[' -z '' ']' + wait_for_running region-pxc 1 + local name=region-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod region-pxc-0 480 + local pod=region-pxc-0 + local max_retry=480 + local ns= ++ echo region-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/region-pxc-0 condition met waiting for pod/region-pxc-0 to become Ready.Ok + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/region-pxc + local resource=statefulset/region-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_region-pxc.yml + desc 'compare statefulset/region-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/region-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/region-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.m1qylZpNwy ++ mktemp + local LAST_ERR=/tmp/tmp.pePEuFoMGL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/region-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.m1qylZpNwy + cat /tmp/tmp.pePEuFoMGL + rm /tmp/tmp.m1qylZpNwy /tmp/tmp.pePEuFoMGL + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-pxc-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_region-pxc.yml + compare_kubectl statefulset/region-proxysql + local resource=statefulset/region-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_region-proxysql.yml + desc 'compare statefulset/region-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/region-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/region-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.QBMSVpuQ5J ++ mktemp + local LAST_ERR=/tmp/tmp.mlfRKUWdw3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/region-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QBMSVpuQ5J + cat /tmp/tmp.mlfRKUWdw3 + rm /tmp/tmp.QBMSVpuQ5J /tmp/tmp.mlfRKUWdw3 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_region-proxysql-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_region-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/region.yml ++ mktemp + local LAST_OUT=/tmp/tmp.5DsAekyokc ++ mktemp + local LAST_ERR=/tmp/tmp.tEJeyrkCiY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/region.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5DsAekyokc perconaxtradbcluster.pxc.percona.com "region" deleted + cat /tmp/tmp.tEJeyrkCiY + rm /tmp/tmp.5DsAekyokc /tmp/tmp.tEJeyrkCiY + return 0 + desc 'check custom anti-affinity' + set +o xtrace ----------------------------------------------------------------------------------- check custom anti-affinity ----------------------------------------------------------------------------------- + check_affinity custom skip_wait + local cluster=custom + local skip_wait=skip_wait + desc 'apply cr config' + set +o xtrace ----------------------------------------------------------------------------------- apply cr config ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/custom.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/custom.yml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.affinity-9648~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1916-e6a59489#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/custom.yml + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + local LAST_OUT=/tmp/tmp.3H3XYQCdDU ++ mktemp + local LAST_ERR=/tmp/tmp.09c6LZAyfQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3H3XYQCdDU perconaxtradbcluster.pxc.percona.com/custom created + cat /tmp/tmp.09c6LZAyfQ + rm /tmp/tmp.3H3XYQCdDU /tmp/tmp.09c6LZAyfQ + return 0 + desc 'check if at least 1 Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if at least 1 Pod started ----------------------------------------------------------------------------------- + '[' -z skip_wait ']' + sleep 5 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/custom-pxc + local resource=statefulset/custom-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_custom-pxc.yml + desc 'compare statefulset/custom-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/custom-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/custom-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.MvT3eCNqMl ++ mktemp + local LAST_ERR=/tmp/tmp.zt2YOw5mq3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/custom-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MvT3eCNqMl + cat /tmp/tmp.zt2YOw5mq3 + rm /tmp/tmp.MvT3eCNqMl /tmp/tmp.zt2YOw5mq3 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-pxc-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_custom-pxc.yml + compare_kubectl statefulset/custom-proxysql + local resource=statefulset/custom-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql.yml + local new_result=/tmp/tmp.i9Z95AxbJY/statefulset_custom-proxysql.yml + desc 'compare statefulset/custom-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/custom-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.28 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.28 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/custom-proxysql + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("affinity-9648", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.Zy7yNz6r51 ++ mktemp + local LAST_ERR=/tmp/tmp.Z81ELEb9fU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/custom-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Zy7yNz6r51 + cat /tmp/tmp.Z81ELEb9fU + rm /tmp/tmp.Zy7yNz6r51 /tmp/tmp.Z81ELEb9fU + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/compare/statefulset_custom-proxysql-k127.yml /tmp/tmp.i9Z95AxbJY/statefulset_custom-proxysql.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/custom.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PuPYj2Jm0w ++ mktemp + local LAST_ERR=/tmp/tmp.No2zRjlayr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1916/e2e-tests/affinity/conf/custom.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PuPYj2Jm0w perconaxtradbcluster.pxc.percona.com "custom" deleted + cat /tmp/tmp.No2zRjlayr + rm /tmp/tmp.PuPYj2Jm0w /tmp/tmp.No2zRjlayr + return 0 + destroy affinity-9648 + local namespace=affinity-9648 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v 'the object has been modified' + grep -v level=info + tee /tmp/tmp.i9Z95AxbJY/operator.log + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + grep -v 'get backup status: Job.batch' +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZPzICGfIkk +++ mktemp ++ local LAST_ERR=/tmp/tmp.WuqR8LPmSS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZPzICGfIkk ++ cat /tmp/tmp.WuqR8LPmSS ++ rm /tmp/tmp.ZPzICGfIkk /tmp/tmp.WuqR8LPmSS ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-646c44b8c8-c5j6j ++ mktemp + local LAST_OUT=/tmp/tmp.zVtG6QsIjc ++ mktemp + local LAST_ERR=/tmp/tmp.IOi2fdA8n3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-646c44b8c8-c5j6j + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zVtG6QsIjc + cat /tmp/tmp.IOi2fdA8n3 + rm /tmp/tmp.zVtG6QsIjc /tmp/tmp.IOi2fdA8n3 + return 0 2025-01-13T16:55:53.290Z INFO setup Manager starting up {"gitCommit": "e6a594899dc6745ae7c945936c58e46986629cde", "gitBranch": "PR-1916-e6a59489", "buildTime": "2025-01-13T16:38:10Z", "goVersion": "go1.23.4", "os": "linux", "arch": "amd64"} 2025-01-13T16:55:53.290Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.28.15-gke.1503000"} 2025-01-13T16:55:53.292Z INFO setup Registering Components. 2025-01-13T16:55:56.786Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-01-13T16:55:56.789Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-01-13T16:55:56.789Z INFO controller-runtime.metrics Starting metrics server 2025-01-13T16:55:56.789Z INFO setup Starting the Cmd. 2025-01-13T16:55:56.789Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-01-13T16:55:56.790Z INFO controller-runtime.webhook Starting webhook server 2025-01-13T16:55:56.869Z INFO controller-runtime.certwatcher Updated current TLS certificate 2025-01-13T16:55:56.870Z INFO controller-runtime.certwatcher Starting certificate watcher 2025-01-13T16:55:56.870Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-01-13T16:55:56.990Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-01-13T16:55:57.001Z DEBUG events percona-xtradb-cluster-operator-646c44b8c8-c5j6j_21981e1d-e6da-4414-93c1-9c5cd14f244c became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"4f97951a-566f-464e-8e0e-cab16abe70b5","apiVersion":"coordination.k8s.io/v1","resourceVersion":"6823"}, "reason": "LeaderElection"} 2025-01-13T16:55:57.001Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-01-13T16:55:57.002Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2025-01-13T16:55:57.002Z INFO Starting Controller {"controller": "pxc-controller"} 2025-01-13T16:55:57.002Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2025-01-13T16:55:57.002Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-01-13T16:55:57.002Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-01-13T16:55:57.002Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-01-13T16:55:57.211Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2025-01-13T16:55:57.211Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2025-01-13T16:55:57.211Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2025-01-13T16:56:27.497Z INFO Set CR version {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "version": "1.17.0"} 2025-01-13T16:56:27.979Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-pxc"} 2025-01-13T16:56:27.997Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-proxysql"} 2025-01-13T16:56:28.152Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-proxysql"} 2025-01-13T16:56:28.174Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-pxc"} 2025-01-13T16:56:28.256Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-pxc-unready"} 2025-01-13T16:56:28.454Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-proxysql"} 2025-01-13T16:56:28.552Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "324dd44d-4f24-43aa-b4ad-c219a06019e7", "object": "hostname-proxysql-unready"} 2025-01-13T16:56:28.727Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "d2613c06-360d-4496-84fc-d834d0cd3bb3", "object": "hostname-pxc"} 2025-01-13T16:57:45.042Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1", "user": "operator"} 2025-01-13T16:57:45.109Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1", "user": "monitor"} 2025-01-13T16:57:45.238Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1"} 2025-01-13T16:57:45.300Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1"} 2025-01-13T16:57:45.425Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1", "user": "xtrabackup"} 2025-01-13T16:57:45.825Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1"} 2025-01-13T16:57:45.925Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1", "user": "replication"} 2025-01-13T16:57:45.945Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "hostname", "reconcileID": "c7e07f1d-f5ba-4b0a-a111-686a41fc1ca1", "err": "get primary pxc pod: not found"} 2025-01-13T16:57:49.559Z INFO Set CR version {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "version": "1.17.0"} 2025-01-13T16:57:49.736Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-pxc"} 2025-01-13T16:57:49.775Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-pxc"} 2025-01-13T16:57:49.807Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-proxysql"} 2025-01-13T16:57:49.909Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-proxysql"} 2025-01-13T16:57:49.938Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-pxc"} 2025-01-13T16:57:50.033Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-pxc-unready"} 2025-01-13T16:57:50.137Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-proxysql"} 2025-01-13T16:57:50.350Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "21110da0-b66f-4595-9d8e-de51be965cc7", "object": "zone-proxysql-unready"} 2025-01-13T16:59:06.288Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26", "user": "operator"} 2025-01-13T16:59:06.316Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26", "user": "monitor"} 2025-01-13T16:59:06.374Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26"} 2025-01-13T16:59:06.405Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26"} 2025-01-13T16:59:06.443Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26", "user": "xtrabackup"} 2025-01-13T16:59:06.489Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26"} 2025-01-13T16:59:06.530Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26", "user": "replication"} 2025-01-13T16:59:06.581Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "zone", "reconcileID": "edc8fad6-32ec-4d09-863b-4c3975f86a26", "err": "get primary pxc pod: not found"} 2025-01-13T16:59:11.664Z INFO Set CR version {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "version": "1.17.0"} 2025-01-13T16:59:11.742Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-pxc"} 2025-01-13T16:59:11.760Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-pxc"} 2025-01-13T16:59:11.793Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-proxysql"} 2025-01-13T16:59:11.842Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-pxc"} 2025-01-13T16:59:11.907Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-pxc-unready"} 2025-01-13T16:59:11.977Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-proxysql"} 2025-01-13T16:59:12.119Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "9d523a98-e843-4430-b50f-4455c064a0d0", "object": "region-proxysql-unready"} 2025-01-13T16:59:12.833Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "0fcaa36a-912f-42e8-a83e-5f62ebb1213a", "object": "region-proxysql"} 2025-01-13T17:00:28.501Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7", "user": "operator"} 2025-01-13T17:00:28.533Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7", "user": "monitor"} 2025-01-13T17:00:28.598Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7"} 2025-01-13T17:00:28.633Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7"} 2025-01-13T17:00:28.660Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7", "user": "xtrabackup"} 2025-01-13T17:00:28.705Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7"} 2025-01-13T17:00:28.734Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7", "user": "replication"} 2025-01-13T17:00:28.751Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "region", "reconcileID": "7a72d32b-ec97-40e3-aa04-1fb3bafeeca7", "err": "get primary pxc pod: not found"} 2025-01-13T17:00:36.563Z INFO Set CR version {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "version": "1.17.0"} 2025-01-13T17:00:36.647Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-pxc"} 2025-01-13T17:00:36.685Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-pxc"} 2025-01-13T17:00:36.722Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-proxysql"} 2025-01-13T17:00:36.785Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-proxysql"} 2025-01-13T17:00:36.819Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-pxc"} 2025-01-13T17:00:36.905Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-pxc-unready"} 2025-01-13T17:00:36.984Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-proxysql"} 2025-01-13T17:00:37.124Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "affinity-9648", "name": "custom", "reconcileID": "a80b0bab-5efc-448e-8e88-8870302cb476", "object": "custom-proxysql-unready"} + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.l9nRY0z7BM ++ mktemp + local LAST_ERR=/tmp/tmp.6v0ZCXZJmF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.l9nRY0z7BM No resources found + cat /tmp/tmp.6v0ZCXZJmF + rm /tmp/tmp.l9nRY0z7BM /tmp/tmp.6v0ZCXZJmF + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.hrm8RvdHEH ++ mktemp + local LAST_ERR=/tmp/tmp.wza3vJPZiD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hrm8RvdHEH No resources found + cat /tmp/tmp.wza3vJPZiD + rm /tmp/tmp.hrm8RvdHEH /tmp/tmp.wza3vJPZiD + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.XMCcUyWb2C ++ mktemp + local LAST_ERR=/tmp/tmp.zesb1R31no + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XMCcUyWb2C No resources found + cat /tmp/tmp.zesb1R31no + rm /tmp/tmp.XMCcUyWb2C /tmp/tmp.zesb1R31no + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.bJT7Omm4AO ++ mktemp + local LAST_ERR=/tmp/tmp.wyknMSXmUd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bJT7Omm4AO validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.wyknMSXmUd + rm /tmp/tmp.bJT7Omm4AO /tmp/tmp.wyknMSXmUd + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.16.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace affinity-9648 + rm -rf /tmp/tmp.i9Z95AxbJY + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.vV0qc4BZc6 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.EQdoSUdSfL + local LAST_ERR=/tmp/tmp.RSVGwFZYnh + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.DpLOPIsnU1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace affinity-9648 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator