Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/logs/cross-site-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + main + local source_cluster=cross-site-source + local replica_cluster=cross-site-replica + desc 'Create source cluster' + set +o xtrace ----------------------------------------------------------------------------------- Create source cluster ----------------------------------------------------------------------------------- + create_infra cross-site-2676 + local ns=cross-site-2676 + '[' -n pxc-operator ']' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + kubectl patch pxc -n cross-site-22001 cross-site-source --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-source patched + kubectl patch pxc -n cross-site-replica-31822 cross-site-replica --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.pBa0eJrkyX ++ mktemp + local LAST_ERR=/tmp/tmp.Nfg4Dkz8nC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pBa0eJrkyX perconaxtradbcluster.pxc.percona.com "cross-site-source" deleted from cross-site-22001 namespace perconaxtradbcluster.pxc.percona.com "cross-site-replica" deleted from cross-site-replica-31822 namespace + cat /tmp/tmp.Nfg4Dkz8nC + rm /tmp/tmp.pBa0eJrkyX /tmp/tmp.Nfg4Dkz8nC + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.QnxnR5RNTe ++ mktemp + local LAST_ERR=/tmp/tmp.WIcxNvANwh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QnxnR5RNTe perconaxtradbclusterbackup.pxc.percona.com "backup-minio-source" deleted from cross-site-22001 namespace + cat /tmp/tmp.WIcxNvANwh + rm /tmp/tmp.QnxnR5RNTe /tmp/tmp.WIcxNvANwh + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.IWiolHlWtZ ++ mktemp + local LAST_ERR=/tmp/tmp.9Ut0sSuPc4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IWiolHlWtZ perconaxtradbclusterrestore.pxc.percona.com "backup-minio" deleted from cross-site-replica-31822 namespace + cat /tmp/tmp.9Ut0sSuPc4 + rm /tmp/tmp.IWiolHlWtZ /tmp/tmp.9Ut0sSuPc4 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.wQBOg2Su15 ++ mktemp + local LAST_ERR=/tmp/tmp.wkDd4XbqDo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.tl6OYkKaYT + awk '{print$1}' ++ mktemp + local LAST_ERR=/tmp/tmp.kP4i21OMsq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wQBOg2Su15 + cat /tmp/tmp.wkDd4XbqDo + rm /tmp/tmp.wQBOg2Su15 /tmp/tmp.wkDd4XbqDo + return 0 namespace "cross-site-22001" deleted namespace "cross-site-replica-31822" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tl6OYkKaYT namespace "pxc-operator" deleted + cat /tmp/tmp.kP4i21OMsq + rm /tmp/tmp.tl6OYkKaYT /tmp/tmp.kP4i21OMsq + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EzbWCGuKkc ++ mktemp + local LAST_ERR=/tmp/tmp.5qcu19Lw37 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EzbWCGuKkc namespace/pxc-operator created + cat /tmp/tmp.5qcu19Lw37 + rm /tmp/tmp.EzbWCGuKkc /tmp/tmp.5qcu19Lw37 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rue4sIaO1c +++ mktemp ++ local LAST_ERR=/tmp/tmp.kGMBBbJVJg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rue4sIaO1c ++ cat /tmp/tmp.kGMBBbJVJg ++ rm /tmp/tmp.rue4sIaO1c /tmp/tmp.kGMBBbJVJg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qzJRSXMkFT ++ mktemp + local LAST_ERR=/tmp/tmp.2iSfQZjCZe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qzJRSXMkFT Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2" modified. + cat /tmp/tmp.2iSfQZjCZe + rm /tmp/tmp.qzJRSXMkFT /tmp/tmp.2iSfQZjCZe + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jY0gntes0h ++ mktemp + local LAST_ERR=/tmp/tmp.PR9Hw1XtJf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jY0gntes0h customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.PR9Hw1XtJf + rm /tmp/tmp.jY0gntes0h /tmp/tmp.PR9Hw1XtJf + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.QKuc56Ee5z ++ mktemp + local LAST_ERR=/tmp/tmp.GqmPRoCqlA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QKuc56Ee5z clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.GqmPRoCqlA + rm /tmp/tmp.QKuc56Ee5z /tmp/tmp.GqmPRoCqlA + return 0 + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.tIqxkzpeA4 ++ mktemp + local LAST_ERR=/tmp/tmp.CBvUiHbOpw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tIqxkzpeA4 deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.CBvUiHbOpw + rm /tmp/tmp.tIqxkzpeA4 /tmp/tmp.CBvUiHbOpw + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.OzOn6bRpVI ++ mktemp + local LAST_ERR=/tmp/tmp.oMt3ZJcxqj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OzOn6bRpVI pod/percona-xtradb-cluster-operator-b5f9c4897-2vjh5 condition met + cat /tmp/tmp.oMt3ZJcxqj + rm /tmp/tmp.OzOn6bRpVI /tmp/tmp.oMt3ZJcxqj + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.BtFfFGIviu +++ mktemp ++ local LAST_ERR=/tmp/tmp.8NqQcxzw2W ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BtFfFGIviu ++ cat /tmp/tmp.8NqQcxzw2W ++ rm /tmp/tmp.BtFfFGIviu /tmp/tmp.8NqQcxzw2W ++ return 0 + wait_pod percona-xtradb-cluster-operator-b5f9c4897-2vjh5 480 pxc-operator + local pod=percona-xtradb-cluster-operator-b5f9c4897-2vjh5 + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-b5f9c4897-2vjh5 ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-b5f9c4897-2vjh5 condition met waiting for pod/percona-xtradb-cluster-operator-b5f9c4897-2vjh5 to become Ready.Ok + sleep 3 + create_namespace cross-site-2676 + local namespace=cross-site-2676 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-2676' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-2676 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-2676 ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.5TVMMJe8jn ++ mktemp + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.quSx9L8u0P + local exit_status=0 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.cBA1Mg6hwx ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-2676 + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_ERR=/tmp/tmp.D3boaHOWvo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-2676 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cBA1Mg6hwx + cat /tmp/tmp.D3boaHOWvo + rm /tmp/tmp.cBA1Mg6hwx /tmp/tmp.D3boaHOWvo + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.5TVMMJe8jn + cat /tmp/tmp.quSx9L8u0P Error from server (NotFound): namespaces "cross-site-2676" not found + rm /tmp/tmp.5TVMMJe8jn /tmp/tmp.quSx9L8u0P + return 1 + : + wait_for_delete namespace/cross-site-2676 + local res=namespace/cross-site-2676 + echo -n 'waiting for namespace/cross-site-2676 to be deleted' waiting for namespace/cross-site-2676 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "cross-site-2676" not found + desc 'create namespace cross-site-2676' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-2676 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-2676 ++ mktemp + local LAST_OUT=/tmp/tmp.c1TcgA25fk ++ mktemp + local LAST_ERR=/tmp/tmp.N5atDEuIot + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cross-site-2676 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c1TcgA25fk namespace/cross-site-2676 created + cat /tmp/tmp.N5atDEuIot + rm /tmp/tmp.c1TcgA25fk /tmp/tmp.N5atDEuIot + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nxfoEynbnY +++ mktemp ++ local LAST_ERR=/tmp/tmp.K4lnCvZYFw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nxfoEynbnY ++ cat /tmp/tmp.K4lnCvZYFw ++ rm /tmp/tmp.nxfoEynbnY /tmp/tmp.K4lnCvZYFw ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=cross-site-2676 ++ mktemp + local LAST_OUT=/tmp/tmp.uJON2CQuor ++ mktemp + local LAST_ERR=/tmp/tmp.fv2furN91e + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=cross-site-2676 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uJON2CQuor Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2" modified. + cat /tmp/tmp.fv2furN91e + rm /tmp/tmp.uJON2CQuor /tmp/tmp.fv2furN91e + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RIHBCYPQfA ++ mktemp + local LAST_ERR=/tmp/tmp.XKduPPrybL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RIHBCYPQfA secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.XKduPPrybL + rm /tmp/tmp.RIHBCYPQfA /tmp/tmp.XKduPPrybL + return 0 + start_minio + deploy_helm cross-site-2676 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Wed Nov 12 10:30:24 2025 NAMESPACE: cross-site-2676 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.cross-site-2676.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace cross-site-2676 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace cross-site-2676 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace cross-site-2676 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace cross-site-2676 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kFEcPetL9k +++ mktemp ++ local LAST_ERR=/tmp/tmp.hLIvtUb9Fg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kFEcPetL9k ++ cat /tmp/tmp.hLIvtUb9Fg ++ rm /tmp/tmp.kFEcPetL9k /tmp/tmp.hLIvtUb9Fg ++ return 0 + MINIO_POD=minio-service-55fcc5d75f-jtbwz + wait_pod minio-service-55fcc5d75f-jtbwz + local pod=minio-service-55fcc5d75f-jtbwz + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo minio-service-55fcc5d75f-jtbwz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/minio-service-55fcc5d75f-jtbwz condition met waiting for pod/minio-service-55fcc5d75f-jtbwz to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.eXrijO9MyG ++ mktemp + local LAST_ERR=/tmp/tmp.lEbWexUz9I + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eXrijO9MyG make_bucket: operator-testing pod "aws-cli" deleted from cross-site-2676 namespace + cat /tmp/tmp.lEbWexUz9I + rm /tmp/tmp.eXrijO9MyG /tmp/tmp.lEbWexUz9I + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RE44rcZu5V ++ mktemp + local LAST_ERR=/tmp/tmp.rcRq2W7KQd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RE44rcZu5V secret/minio-secret unchanged secret/aws-s3-secret unchanged secret/gcp-cs-secret unchanged secret/azure-secret unchanged + cat /tmp/tmp.rcRq2W7KQd + rm /tmp/tmp.RE44rcZu5V /tmp/tmp.rcRq2W7KQd + return 0 + spinup_pxc cross-site-source /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-source.yml + local cluster=cross-site-source + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-source.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.1ZKtMpv06R ++ mktemp + local LAST_ERR=/tmp/tmp.e4xqViofyi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1ZKtMpv06R secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.e4xqViofyi + rm /tmp/tmp.1ZKtMpv06R /tmp/tmp.e4xqViofyi + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-2676~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_OUT=/tmp/tmp.wd8RkUt0wY ++ mktemp + local LAST_ERR=/tmp/tmp.KK8EcoFie6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wd8RkUt0wY deployment.apps/pxc-client created + cat /tmp/tmp.KK8EcoFie6 + rm /tmp/tmp.wd8RkUt0wY /tmp/tmp.KK8EcoFie6 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-source.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-source.yml ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-2676~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.VudyKffefK + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-source.yml ++ mktemp + local LAST_ERR=/tmp/tmp.w2Wi1eea50 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VudyKffefK perconaxtradbcluster.pxc.percona.com/cross-site-source created + cat /tmp/tmp.w2Wi1eea50 + rm /tmp/tmp.VudyKffefK /tmp/tmp.w2Wi1eea50 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy cross-site-source ++ local target_cluster=cross-site-source +++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SB3saDdSmE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EBlZ4ejHYf +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.SB3saDdSmE +++ cat /tmp/tmp.EBlZ4ejHYf +++ rm /tmp/tmp.SB3saDdSmE /tmp/tmp.EBlZ4ejHYf +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo cross-site-source-haproxy ++ return + local proxy=cross-site-source-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 ++ mktemp + local LAST_OUT=/tmp/tmp.glWUhmXCzv ++ mktemp + local LAST_ERR=/tmp/tmp.kwHfLYEmux + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.glWUhmXCzv + cat /tmp/tmp.kwHfLYEmux error: no matching resources found + rm /tmp/tmp.glWUhmXCzv /tmp/tmp.kwHfLYEmux + return 1 + true + wait_for_running cross-site-source-haproxy 1 + local name=cross-site-source-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-haproxy-0 480 + local pod=cross-site-source-haproxy-0 + local max_retry=480 + local ns= ++ echo cross-site-source-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/cross-site-source-haproxy-0 condition met waiting for pod/cross-site-source-haproxy-0 to become Ready.Ok + wait_for_running cross-site-source-pxc 3 + local name=cross-site-source-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-0 480 + local pod=cross-site-source-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo cross-site-source-pxc-0 + local container=pxc + set +o xtrace pod/cross-site-source-pxc-0 condition met waiting for pod/cross-site-source-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-1 480 + local pod=cross-site-source-pxc-1 + local max_retry=480 + local ns= ++ echo cross-site-source-pxc-1 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/cross-site-source-pxc-1 condition met waiting for pod/cross-site-source-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-2 480 + local pod=cross-site-source-pxc-2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo cross-site-source-pxc-2 ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-source-pxc-2 condition met waiting for pod/cross-site-source-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.JnyihHTeIO +++ mktemp ++ local LAST_ERR=/tmp/tmp.cqoUnScSNm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JnyihHTeIO ++ cat /tmp/tmp.cqoUnScSNm ++ rm /tmp/tmp.JnyihHTeIO /tmp/tmp.cqoUnScSNm ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h cross-site-source-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h cross-site-source-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nPAlLnEtnH +++ mktemp ++ local LAST_ERR=/tmp/tmp.odjC6yyJEs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nPAlLnEtnH ++ cat /tmp/tmp.odjC6yyJEs ++ rm /tmp/tmp.nPAlLnEtnH /tmp/tmp.odjC6yyJEs ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-tkzdl ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h cross-site-source-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h cross-site-source-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kaHgOUJerI +++ mktemp ++ local LAST_ERR=/tmp/tmp.MyfwZB1Sie ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kaHgOUJerI ++ cat /tmp/tmp.MyfwZB1Sie ++ rm /tmp/tmp.kaHgOUJerI /tmp/tmp.MyfwZB1Sie ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-59944c5bbf-tkzdl ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.85UKqNivb1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RiOJXilyQB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.85UKqNivb1 ++ cat /tmp/tmp.RiOJXilyQB ++ rm /tmp/tmp.85UKqNivb1 /tmp/tmp.RiOJXilyQB ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-tkzdl ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zXGDdhqf6f +++ mktemp ++ local LAST_ERR=/tmp/tmp.NrVMpsgdXD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zXGDdhqf6f ++ cat /tmp/tmp.NrVMpsgdXD ++ rm /tmp/tmp.zXGDdhqf6f /tmp/tmp.NrVMpsgdXD ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-tkzdl + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ipPhr5TEJQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tpHVgxlvr0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ipPhr5TEJQ ++ cat /tmp/tmp.tpHVgxlvr0 ++ rm /tmp/tmp.ipPhr5TEJQ /tmp/tmp.tpHVgxlvr0 ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-tkzdl ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql ++ is_keyring_plugin_in_use cross-site-source ++ local cluster=cross-site-source ++ kubectl_bin exec -it cross-site-source-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3E63utHFjM +++ mktemp ++ local LAST_ERR=/tmp/tmp.OE5zx1Cu5D ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it cross-site-source-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3E63utHFjM ++ cat /tmp/tmp.OE5zx1Cu5D Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.3E63utHFjM /tmp/tmp.OE5zx1Cu5D ++ return 0 + '[' '' ']' + sleep 60 + desc 'get main cluster services endpoints' + set +o xtrace ----------------------------------------------------------------------------------- get main cluster services endpoints ----------------------------------------------------------------------------------- ++ get_service_ip cross-site-source-pxc-0 ++ local service=cross-site-source-pxc-0 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ioPGNnClRt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Rtthu7scP6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ioPGNnClRt +++ cat /tmp/tmp.Rtthu7scP6 +++ rm /tmp/tmp.ioPGNnClRt /tmp/tmp.Rtthu7scP6 +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.96DEzVdzhJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ra9fsIYKh3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.96DEzVdzhJ ++ cat /tmp/tmp.Ra9fsIYKh3 ++ rm /tmp/tmp.96DEzVdzhJ /tmp/tmp.Ra9fsIYKh3 ++ return 0 ++ return + source_endpoint0=34.118.229.201 ++ get_service_ip cross-site-source-pxc-1 ++ local service=cross-site-source-pxc-1 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zI0LzmAanf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0NLjijocRD +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.zI0LzmAanf +++ cat /tmp/tmp.0NLjijocRD +++ rm /tmp/tmp.zI0LzmAanf /tmp/tmp.0NLjijocRD +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MksP2VZgra +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ec1gCMBAfg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MksP2VZgra ++ cat /tmp/tmp.Ec1gCMBAfg ++ rm /tmp/tmp.MksP2VZgra /tmp/tmp.Ec1gCMBAfg ++ return 0 ++ return + source_endpoint1=34.118.233.218 ++ get_service_ip cross-site-source-pxc-2 ++ local service=cross-site-source-pxc-2 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.td3Xaqe0Fn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7caWLNoyPy +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.td3Xaqe0Fn +++ cat /tmp/tmp.7caWLNoyPy +++ rm /tmp/tmp.td3Xaqe0Fn /tmp/tmp.7caWLNoyPy +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ksxd9ixkaL +++ mktemp ++ local LAST_ERR=/tmp/tmp.J6S4eN1oG5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ksxd9ixkaL ++ cat /tmp/tmp.J6S4eN1oG5 ++ rm /tmp/tmp.Ksxd9ixkaL /tmp/tmp.J6S4eN1oG5 ++ return 0 ++ return + source_endpoint2=34.118.226.72 ++ run_mysql 'SELECT @@hostname hostname;' '-h cross-site-source-haproxy -p33062 -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h cross-site-source-haproxy -p33062 -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PbYM50YXOP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3oZttPMO7u +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PbYM50YXOP +++ cat /tmp/tmp.3oZttPMO7u +++ rm /tmp/tmp.PbYM50YXOP /tmp/tmp.3oZttPMO7u +++ return 0 ++ client_pod=pxc-client-59944c5bbf-tkzdl ++ wait_pod pxc-client-59944c5bbf-tkzdl ++ local pod=pxc-client-59944c5bbf-tkzdl ++ local max_retry=480 ++ local ns= +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' +++ echo pxc-client-59944c5bbf-tkzdl ++ local container= ++ set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + source_primary=cross-site-source-pxc-0 ++ get_service_ip cross-site-source-pxc-0 ++ local service=cross-site-source-pxc-0 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BCsEssyqUh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9tioIE8l34 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BCsEssyqUh +++ cat /tmp/tmp.9tioIE8l34 +++ rm /tmp/tmp.BCsEssyqUh /tmp/tmp.9tioIE8l34 +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UTODDnqdnQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.bQK5myGR54 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UTODDnqdnQ ++ cat /tmp/tmp.bQK5myGR54 ++ rm /tmp/tmp.UTODDnqdnQ /tmp/tmp.bQK5myGR54 ++ return 0 ++ return + source_primary_endpoint=34.118.229.201 + desc 'patch source cluster with replicationChannels settings' + set +o xtrace ----------------------------------------------------------------------------------- patch source cluster with replicationChannels settings ----------------------------------------------------------------------------------- + kubectl_bin patch pxc cross-site-source --type=merge --patch '{"spec": {"pxc":{"replicationChannels": [{"name":"source_to_replica", "isSource": true}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.g4xAwHfzHX ++ mktemp + local LAST_ERR=/tmp/tmp.KiQD6oA0mb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-source --type=merge --patch '{"spec": {"pxc":{"replicationChannels": [{"name":"source_to_replica", "isSource": true}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.g4xAwHfzHX perconaxtradbcluster.pxc.percona.com/cross-site-source patched + cat /tmp/tmp.KiQD6oA0mb + rm /tmp/tmp.g4xAwHfzHX /tmp/tmp.KiQD6oA0mb + return 0 + desc 'patch main cluster secrets with replication user' + set +o xtrace ----------------------------------------------------------------------------------- patch main cluster secrets with replication user ----------------------------------------------------------------------------------- ++ base64 ++ echo -n new_password + kubectl_bin patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.nBNsaU2cZH ++ mktemp + local LAST_ERR=/tmp/tmp.Bt2oPF5DaO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nBNsaU2cZH secret/my-cluster-secrets patched + cat /tmp/tmp.Bt2oPF5DaO + rm /tmp/tmp.nBNsaU2cZH /tmp/tmp.Bt2oPF5DaO + return 0 + sleep 15 + wait_cluster_consistency cross-site-source 3 2 + local cluster_name=cross-site-source + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/cross-site-source to be ready' waiting for pxc/cross-site-source to be ready++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VS9ZAbWQCd +++ mktemp ++ local LAST_ERR=/tmp/tmp.iwW8HKdsX3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VS9ZAbWQCd ++ cat /tmp/tmp.iwW8HKdsX3 ++ rm /tmp/tmp.VS9ZAbWQCd /tmp/tmp.iwW8HKdsX3 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2EyZpXIIRE +++ mktemp ++ local LAST_ERR=/tmp/tmp.S6zdDqbGWK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2EyZpXIIRE ++ cat /tmp/tmp.S6zdDqbGWK ++ rm /tmp/tmp.2EyZpXIIRE /tmp/tmp.S6zdDqbGWK ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-source +++ local cluster_name=cross-site-source ++++ get_proxy cross-site-source ++++ local target_cluster=cross-site-source +++++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.fLhcd5i378 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.jF3mboPpgq +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.fLhcd5i378 +++++ cat /tmp/tmp.jF3mboPpgq +++++ rm /tmp/tmp.fLhcd5i378 /tmp/tmp.jF3mboPpgq +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-source-haproxy ++++ return +++ local cluster_proxy=cross-site-source-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IniQnnuAJg +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ZF0LKWCo8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IniQnnuAJg ++ cat /tmp/tmp.0ZF0LKWCo8 ++ rm /tmp/tmp.IniQnnuAJg /tmp/tmp.0ZF0LKWCo8 ++ return 0 + [[ 2 == \2 ]] + echo + desc 'write data to source cluster' + set +o xtrace ----------------------------------------------------------------------------------- write data to source cluster ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testSourceReplica (id int PRIMARY KEY);' '-h 34.118.229.201 -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testSourceReplica (id int PRIMARY KEY);' + local 'uri=-h 34.118.229.201 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.prjmZAgndW +++ mktemp ++ local LAST_ERR=/tmp/tmp.vFF1q2SIv9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.prjmZAgndW ++ cat /tmp/tmp.vFF1q2SIv9 ++ rm /tmp/tmp.prjmZAgndW /tmp/tmp.vFF1q2SIv9 ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-tkzdl ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.testSourceReplica (id) VALUES (100700)' '-h 34.118.229.201 -uroot -proot_password' + local 'command=INSERT myApp.testSourceReplica (id) VALUES (100700)' + local 'uri=-h 34.118.229.201 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DkvX1voxfH +++ mktemp ++ local LAST_ERR=/tmp/tmp.izW5tAFbwu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DkvX1voxfH ++ cat /tmp/tmp.izW5tAFbwu ++ rm /tmp/tmp.DkvX1voxfH /tmp/tmp.izW5tAFbwu ++ return 0 + client_pod=pxc-client-59944c5bbf-tkzdl + wait_pod pxc-client-59944c5bbf-tkzdl + local pod=pxc-client-59944c5bbf-tkzdl + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-tkzdl ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-tkzdl condition met waiting for pod/pxc-client-59944c5bbf-tkzdl to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + kubectl_bin get secrets cross-site-source-ssl-internal -o yaml + yq '.metadata={"name": "cross-site-replica-ssl-internal"}' - + yq 'del(.metadata)' - ++ mktemp + local LAST_OUT=/tmp/tmp.ele1XgDdJc ++ mktemp + local LAST_ERR=/tmp/tmp.748vjC5UeY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get secrets cross-site-source-ssl-internal -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ele1XgDdJc + cat /tmp/tmp.748vjC5UeY + rm /tmp/tmp.ele1XgDdJc /tmp/tmp.748vjC5UeY + return 0 + desc 'take backup of source cluster' + set +o xtrace ----------------------------------------------------------------------------------- take backup of source cluster ----------------------------------------------------------------------------------- + run_backup cross-site-source backup-minio-source + local cluster=cross-site-source + local backup=backup-minio-source + log 'run pxc-backup/backup-minio-source' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:39:20+0000]' run pxc-backup/backup-minio-source [2025-11-12T10:39:20+0000] run pxc-backup/backup-minio-source + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/backup-minio-source.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Ktb5Anu9tR ++ mktemp + local LAST_ERR=/tmp/tmp.Ku4X5aS9cE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/backup-minio-source.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ktb5Anu9tR perconaxtradbclusterbackup.pxc.percona.com/backup-minio-source created + cat /tmp/tmp.Ku4X5aS9cE + rm /tmp/tmp.Ktb5Anu9tR /tmp/tmp.Ku4X5aS9cE + return 0 + wait_backup backup-minio-source + local backup=backup-minio-source + local status=Succeeded + set +o xtrace waiting for pxc-backup/backup-minio-source to reach Succeeded state...............Succeeded + desc 'create replica cluster' + set +o xtrace ----------------------------------------------------------------------------------- create replica cluster ----------------------------------------------------------------------------------- + create_namespace cross-site-replica-23217 0 + local namespace=cross-site-replica-23217 + local skip_clean_namespace=0 + [[ 1 == 1 ]] + [[ -z 0 ]] + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-replica-23217' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-replica-23217 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-replica-23217 ++ mktemp + local LAST_OUT=/tmp/tmp.18Ix7qmIQ8 ++ mktemp + local LAST_ERR=/tmp/tmp.B49cGfTlYH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-23217 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-23217 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-23217 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.18Ix7qmIQ8 + cat /tmp/tmp.B49cGfTlYH Error from server (NotFound): namespaces "cross-site-replica-23217" not found + rm /tmp/tmp.18Ix7qmIQ8 /tmp/tmp.B49cGfTlYH + return 1 + : + wait_for_delete namespace/cross-site-replica-23217 + local res=namespace/cross-site-replica-23217 + echo -n 'waiting for namespace/cross-site-replica-23217 to be deleted' waiting for namespace/cross-site-replica-23217 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "cross-site-replica-23217" not found + desc 'create namespace cross-site-replica-23217' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-replica-23217 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-replica-23217 ++ mktemp + local LAST_OUT=/tmp/tmp.bxLSk4mARt ++ mktemp + local LAST_ERR=/tmp/tmp.9PLce89cXB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cross-site-replica-23217 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bxLSk4mARt namespace/cross-site-replica-23217 created + cat /tmp/tmp.9PLce89cXB + rm /tmp/tmp.bxLSk4mARt /tmp/tmp.9PLce89cXB + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4OMSopuueu +++ mktemp ++ local LAST_ERR=/tmp/tmp.qAkoWnHwOQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4OMSopuueu ++ cat /tmp/tmp.qAkoWnHwOQ ++ rm /tmp/tmp.4OMSopuueu /tmp/tmp.qAkoWnHwOQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=cross-site-replica-23217 ++ mktemp + local LAST_OUT=/tmp/tmp.9y8Ijf5YNk ++ mktemp + local LAST_ERR=/tmp/tmp.KUERDH5tBa + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2 --namespace=cross-site-replica-23217 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9y8Ijf5YNk Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster2" modified. + cat /tmp/tmp.KUERDH5tBa + rm /tmp/tmp.9y8Ijf5YNk /tmp/tmp.KUERDH5tBa + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.BeTsBfAqTN ++ mktemp + local LAST_ERR=/tmp/tmp.efjJJRENxA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BeTsBfAqTN customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.efjJJRENxA + rm /tmp/tmp.BeTsBfAqTN /tmp/tmp.efjJJRENxA + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Ex4tjk3YYJ ++ mktemp + local LAST_ERR=/tmp/tmp.teYlfN9TrK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ex4tjk3YYJ clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.teYlfN9TrK + rm /tmp/tmp.Ex4tjk3YYJ /tmp/tmp.teYlfN9TrK + return 0 + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694^' + kubectl_bin apply -f - ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + local LAST_OUT=/tmp/tmp.GYlPZUvPbu ++ mktemp + local LAST_ERR=/tmp/tmp.Ar4H6sZrUg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GYlPZUvPbu deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.Ar4H6sZrUg + rm /tmp/tmp.GYlPZUvPbu /tmp/tmp.Ar4H6sZrUg + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.4cGqfc6DKB ++ mktemp + local LAST_ERR=/tmp/tmp.pH5RSBGCVO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4cGqfc6DKB pod/percona-xtradb-cluster-operator-b5f9c4897-868cm condition met + cat /tmp/tmp.pH5RSBGCVO + rm /tmp/tmp.4cGqfc6DKB /tmp/tmp.pH5RSBGCVO + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.9acJE99mU7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5LP2wSB7Yk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9acJE99mU7 ++ cat /tmp/tmp.5LP2wSB7Yk ++ rm /tmp/tmp.9acJE99mU7 /tmp/tmp.5LP2wSB7Yk ++ return 0 + wait_pod percona-xtradb-cluster-operator-b5f9c4897-2vjh5 480 pxc-operator + local pod=percona-xtradb-cluster-operator-b5f9c4897-2vjh5 + local max_retry=480 + local ns=pxc-operator ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo percona-xtradb-cluster-operator-b5f9c4897-2vjh5 + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-b5f9c4897-2vjh5 condition met waiting for pod/percona-xtradb-cluster-operator-b5f9c4897-2vjh5 to become Ready.Ok + sleep 3 + kubectl_bin apply -f /tmp/tmp.j31CMwHlyC/replica-ssl-internal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gG7IPUebwv ++ mktemp + local LAST_ERR=/tmp/tmp.9FtVWKFzdp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.j31CMwHlyC/replica-ssl-internal.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gG7IPUebwv secret/cross-site-replica-ssl-internal created + cat /tmp/tmp.9FtVWKFzdp + rm /tmp/tmp.gG7IPUebwv /tmp/tmp.9FtVWKFzdp + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.a1w4J5asd2 ++ mktemp + local LAST_ERR=/tmp/tmp.XYSZyIoqyO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.a1w4J5asd2 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.XYSZyIoqyO + rm /tmp/tmp.a1w4J5asd2 /tmp/tmp.XYSZyIoqyO + return 0 + spinup_pxc cross-site-replica /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-replica.yml + local cluster=cross-site-replica + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-replica.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BVirYUh9v3 ++ mktemp + local LAST_ERR=/tmp/tmp.FkrkZSInaw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BVirYUh9v3 secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.FkrkZSInaw + rm /tmp/tmp.BVirYUh9v3 /tmp/tmp.FkrkZSInaw + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-2676~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.bSHSBXeLk1 ++ mktemp + local LAST_ERR=/tmp/tmp.WC3CGgDEV5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bSHSBXeLk1 deployment.apps/pxc-client created + cat /tmp/tmp.WC3CGgDEV5 + rm /tmp/tmp.bSHSBXeLk1 /tmp/tmp.WC3CGgDEV5 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-replica.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-replica.yml + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + local LAST_OUT=/tmp/tmp.hYEUlQIy51 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/cross-site-replica.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + local LAST_ERR=/tmp/tmp.DwWz1pfAky + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-2676~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hYEUlQIy51 perconaxtradbcluster.pxc.percona.com/cross-site-replica created + cat /tmp/tmp.DwWz1pfAky + rm /tmp/tmp.hYEUlQIy51 /tmp/tmp.DwWz1pfAky + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy cross-site-replica ++ local target_cluster=cross-site-replica +++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o5YRbEw54C ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wi3l6Cw6k1 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.o5YRbEw54C +++ cat /tmp/tmp.wi3l6Cw6k1 +++ rm /tmp/tmp.o5YRbEw54C /tmp/tmp.wi3l6Cw6k1 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo cross-site-replica-haproxy ++ return + local proxy=cross-site-replica-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 ++ mktemp + local LAST_OUT=/tmp/tmp.eoq6Wgvc5P ++ mktemp + local LAST_ERR=/tmp/tmp.YReUoxbnq8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-2676 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.eoq6Wgvc5P + cat /tmp/tmp.YReUoxbnq8 error: no matching resources found + rm /tmp/tmp.eoq6Wgvc5P /tmp/tmp.YReUoxbnq8 + return 1 + true + wait_for_running cross-site-replica-haproxy 1 + local name=cross-site-replica-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-haproxy-0 480 + local pod=cross-site-replica-haproxy-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo cross-site-replica-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/cross-site-replica-haproxy-0 condition met waiting for pod/cross-site-replica-haproxy-0 to become Ready.Ok + wait_for_running cross-site-replica-pxc 3 + local name=cross-site-replica-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-0 480 + local pod=cross-site-replica-pxc-0 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-0 condition met waiting for pod/cross-site-replica-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-1 480 + local pod=cross-site-replica-pxc-1 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-1 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-1 condition met waiting for pod/cross-site-replica-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-2 480 + local pod=cross-site-replica-pxc-2 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-2 condition met waiting for pod/cross-site-replica-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ base64 --decode ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wxLDeF5H4D +++ mktemp ++ local LAST_ERR=/tmp/tmp.M0or0y4sSw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wxLDeF5H4D ++ cat /tmp/tmp.M0or0y4sSw ++ rm /tmp/tmp.wxLDeF5H4D /tmp/tmp.M0or0y4sSw ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h cross-site-replica-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h cross-site-replica-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CxPcvJsy40 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fo3kiVChMe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CxPcvJsy40 ++ cat /tmp/tmp.Fo3kiVChMe ++ rm /tmp/tmp.CxPcvJsy40 /tmp/tmp.Fo3kiVChMe ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-rpn2d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h cross-site-replica-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h cross-site-replica-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hcgQL4vyij +++ mktemp ++ local LAST_ERR=/tmp/tmp.qOV17r83Al ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hcgQL4vyij ++ cat /tmp/tmp.qOV17r83Al ++ rm /tmp/tmp.hcgQL4vyij /tmp/tmp.qOV17r83Al ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-59944c5bbf-rpn2d + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fE9BcLgz9Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.aZczrYhUqa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fE9BcLgz9Q ++ cat /tmp/tmp.aZczrYhUqa ++ rm /tmp/tmp.fE9BcLgz9Q /tmp/tmp.aZczrYhUqa ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-rpn2d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X4SnG9VHWp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rl46f3FDLb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.X4SnG9VHWp ++ cat /tmp/tmp.Rl46f3FDLb ++ rm /tmp/tmp.X4SnG9VHWp /tmp/tmp.Rl46f3FDLb ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-rpn2d ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4Ibc8j0Odr +++ mktemp ++ local LAST_ERR=/tmp/tmp.0XqE6XjSV4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4Ibc8j0Odr ++ cat /tmp/tmp.0XqE6XjSV4 ++ rm /tmp/tmp.4Ibc8j0Odr /tmp/tmp.0XqE6XjSV4 ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-rpn2d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.j31CMwHlyC/select-1.sql ++ is_keyring_plugin_in_use cross-site-replica ++ local cluster=cross-site-replica ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ kubectl_bin exec -it cross-site-replica-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sltLY7u7tx +++ mktemp ++ local LAST_ERR=/tmp/tmp.iNvORff6sZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it cross-site-replica-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sltLY7u7tx ++ cat /tmp/tmp.iNvORff6sZ Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.sltLY7u7tx /tmp/tmp.iNvORff6sZ ++ return 0 + '[' '' ']' + sleep 60 + desc 'restore backup from source cluster' + set +o xtrace ----------------------------------------------------------------------------------- restore backup from source cluster ----------------------------------------------------------------------------------- ++ kubectl_bin get -n cross-site-2676 pxc-backup backup-minio-source -o 'jsonpath={.status.destination}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qXgFEhvlUA +++ mktemp ++ local LAST_ERR=/tmp/tmp.PHjLQUfiZI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get -n cross-site-2676 pxc-backup backup-minio-source -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qXgFEhvlUA ++ cat /tmp/tmp.PHjLQUfiZI ++ rm /tmp/tmp.qXgFEhvlUA /tmp/tmp.PHjLQUfiZI ++ return 0 + destination= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/conf/restore-backup-minio.yml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-2676~ + kubectl_bin apply -f - + /usr/bin/sed -e s~#destination~~ + /usr/bin/sed -e s~#cluster~cross-site-replica~ ++ mktemp + local LAST_OUT=/tmp/tmp.14bUOZIe0w ++ mktemp + local LAST_ERR=/tmp/tmp.5PhS1ieyZ0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.14bUOZIe0w perconaxtradbclusterrestore.pxc.percona.com/backup-minio created + cat /tmp/tmp.5PhS1ieyZ0 + rm /tmp/tmp.14bUOZIe0w /tmp/tmp.5PhS1ieyZ0 + return 0 + wait_cluster_consistency cross-site-replica 3 2 + local cluster_name=cross-site-replica + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/cross-site-replica to be ready' waiting for pxc/cross-site-replica to be ready++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3r9vSSHifD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bkdv6L81Kw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3r9vSSHifD ++ cat /tmp/tmp.Bkdv6L81Kw ++ rm /tmp/tmp.3r9vSSHifD /tmp/tmp.Bkdv6L81Kw ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7IFzLdwIr1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4fxrMZ8TE6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7IFzLdwIr1 ++ cat /tmp/tmp.4fxrMZ8TE6 ++ rm /tmp/tmp.7IFzLdwIr1 /tmp/tmp.4fxrMZ8TE6 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-replica +++ local cluster_name=cross-site-replica ++++ get_proxy cross-site-replica ++++ local target_cluster=cross-site-replica +++++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.QdAGELWw6H ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.rXMO9rcNeK +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.QdAGELWw6H +++++ cat /tmp/tmp.rXMO9rcNeK +++++ rm /tmp/tmp.QdAGELWw6H /tmp/tmp.rXMO9rcNeK +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-replica-haproxy ++++ return +++ local cluster_proxy=cross-site-replica-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wMYiGn1UPn +++ mktemp ++ local LAST_ERR=/tmp/tmp.uyPhTzAYPw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wMYiGn1UPn ++ cat /tmp/tmp.uyPhTzAYPw ++ rm /tmp/tmp.wMYiGn1UPn /tmp/tmp.uyPhTzAYPw ++ return 0 + [[ 2 == \2 ]] + echo + desc 'get replica cluster services endpoints' + set +o xtrace ----------------------------------------------------------------------------------- get replica cluster services endpoints ----------------------------------------------------------------------------------- ++ get_service_ip cross-site-replica-pxc-0 ++ local service=cross-site-replica-pxc-0 ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Mne8R3Jf3A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nwNJxEBytx +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Mne8R3Jf3A +++ cat /tmp/tmp.nwNJxEBytx +++ rm /tmp/tmp.Mne8R3Jf3A /tmp/tmp.nwNJxEBytx +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1gQh8sLUAs +++ mktemp ++ local LAST_ERR=/tmp/tmp.oWLwGMFx2r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1gQh8sLUAs ++ cat /tmp/tmp.oWLwGMFx2r ++ rm /tmp/tmp.1gQh8sLUAs /tmp/tmp.oWLwGMFx2r ++ return 0 ++ return + replica_endpoint0=34.118.232.34 ++ get_service_ip cross-site-replica-pxc-1 ++ local service=cross-site-replica-pxc-1 ++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9EQGo80hWK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5HLRzxEFuE +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.9EQGo80hWK +++ cat /tmp/tmp.5HLRzxEFuE +++ rm /tmp/tmp.9EQGo80hWK /tmp/tmp.5HLRzxEFuE +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CVJFORTkXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.tSqZyJH0Jr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CVJFORTkXj ++ cat /tmp/tmp.tSqZyJH0Jr ++ rm /tmp/tmp.CVJFORTkXj /tmp/tmp.tSqZyJH0Jr ++ return 0 ++ return + replica_endpoint1=34.118.233.76 ++ get_service_ip cross-site-replica-pxc-2 ++ local service=cross-site-replica-pxc-2 ++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b5hKX4Ne9I ++++ mktemp +++ local LAST_ERR=/tmp/tmp.x89zh1ZK0i +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.b5hKX4Ne9I +++ cat /tmp/tmp.x89zh1ZK0i +++ rm /tmp/tmp.b5hKX4Ne9I /tmp/tmp.x89zh1ZK0i +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.broLWDh6wI +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RTmSewJ09 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.broLWDh6wI ++ cat /tmp/tmp.6RTmSewJ09 ++ rm /tmp/tmp.broLWDh6wI /tmp/tmp.6RTmSewJ09 ++ return 0 ++ return + replica_endpoint2=34.118.226.3 ++ run_mysql 'SELECT @@hostname hostname;' '-h cross-site-replica-haproxy -p33062 -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h cross-site-replica-haproxy -p33062 -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ugsg7XDrsI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.79e0qQLpDJ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ugsg7XDrsI +++ cat /tmp/tmp.79e0qQLpDJ +++ rm /tmp/tmp.ugsg7XDrsI /tmp/tmp.79e0qQLpDJ +++ return 0 ++ client_pod=pxc-client-59944c5bbf-rpn2d ++ wait_pod pxc-client-59944c5bbf-rpn2d ++ local pod=pxc-client-59944c5bbf-rpn2d ++ local max_retry=480 ++ local ns= +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' +++ echo pxc-client-59944c5bbf-rpn2d ++ local container= ++ set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok ++ set +o xtrace + replica_primary=cross-site-replica-pxc-0 ++ get_service_ip cross-site-replica-pxc-0 ++ local service=cross-site-replica-pxc-0 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CrmKfuDO1V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TvN0fEfnI2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.CrmKfuDO1V +++ cat /tmp/tmp.TvN0fEfnI2 +++ rm /tmp/tmp.CrmKfuDO1V /tmp/tmp.TvN0fEfnI2 +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0buegX2tFm +++ mktemp ++ local LAST_ERR=/tmp/tmp.lr0saQINTR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0buegX2tFm ++ cat /tmp/tmp.lr0saQINTR ++ rm /tmp/tmp.0buegX2tFm /tmp/tmp.lr0saQINTR ++ return 0 ++ return + replica_primary_endpoint=34.118.232.34 + run_mysql 'DELETE FROM myApp.myApp WHERE id=100500' '-h 34.118.232.34 -uroot -proot_password' + local 'command=DELETE FROM myApp.myApp WHERE id=100500' + local 'uri=-h 34.118.232.34 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GEiMxF0l32 +++ mktemp ++ local LAST_ERR=/tmp/tmp.waggeq5Sf1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GEiMxF0l32 ++ cat /tmp/tmp.waggeq5Sf1 ++ rm /tmp/tmp.GEiMxF0l32 /tmp/tmp.waggeq5Sf1 ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-59944c5bbf-rpn2d ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + desc 'patch replica cluster with replicationChannels settings' + set +o xtrace ----------------------------------------------------------------------------------- patch replica cluster with replicationChannels settings ----------------------------------------------------------------------------------- + kubectl_bin patch pxc cross-site-replica --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "source_to_replica", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "34.118.229.201", "port": 3306, "weight": 100},{"host": "34.118.233.218", "port": 3306, "weight": 100},{"host": "34.118.226.72", "port": 3306, "weight": 100}]}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.Yq0Tg69PRo ++ mktemp + local LAST_ERR=/tmp/tmp.EGn1Ka4E5l + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-replica --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "source_to_replica", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "34.118.229.201", "port": 3306, "weight": 100},{"host": "34.118.233.218", "port": 3306, "weight": 100},{"host": "34.118.226.72", "port": 3306, "weight": 100}]}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Yq0Tg69PRo perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + cat /tmp/tmp.EGn1Ka4E5l + rm /tmp/tmp.Yq0Tg69PRo /tmp/tmp.EGn1Ka4E5l + return 0 + sleep 40 + desc 'patch replica cluster secrets with replication user' + set +o xtrace ----------------------------------------------------------------------------------- patch replica cluster secrets with replication user ----------------------------------------------------------------------------------- ++ echo -n new_password ++ base64 + kubectl_bin patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.Rf1HDra8Mj ++ mktemp + local LAST_ERR=/tmp/tmp.bpBFyQxyXD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Rf1HDra8Mj secret/my-cluster-secrets patched + cat /tmp/tmp.bpBFyQxyXD + rm /tmp/tmp.Rf1HDra8Mj /tmp/tmp.bpBFyQxyXD + return 0 + sleep 15 + wait_cluster_consistency cross-site-replica 3 2 + local cluster_name=cross-site-replica + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/cross-site-replica to be ready' waiting for pxc/cross-site-replica to be ready++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ONNQkml5c9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a68F0imVYU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ONNQkml5c9 ++ cat /tmp/tmp.a68F0imVYU ++ rm /tmp/tmp.ONNQkml5c9 /tmp/tmp.a68F0imVYU ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4PeexWEwNm +++ mktemp ++ local LAST_ERR=/tmp/tmp.YW4xZ9hgQV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4PeexWEwNm ++ cat /tmp/tmp.YW4xZ9hgQV ++ rm /tmp/tmp.4PeexWEwNm /tmp/tmp.YW4xZ9hgQV ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-replica +++ local cluster_name=cross-site-replica ++++ get_proxy cross-site-replica ++++ local target_cluster=cross-site-replica +++++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.jRpe1weTy5 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.MkDYQ6MOTK +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.jRpe1weTy5 +++++ cat /tmp/tmp.MkDYQ6MOTK +++++ rm /tmp/tmp.jRpe1weTy5 /tmp/tmp.MkDYQ6MOTK +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-replica-haproxy ++++ return +++ local cluster_proxy=cross-site-replica-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tYY1nQnFlC +++ mktemp ++ local LAST_ERR=/tmp/tmp.oGQL7L4YLK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tYY1nQnFlC ++ cat /tmp/tmp.oGQL7L4YLK ++ rm /tmp/tmp.tYY1nQnFlC /tmp/tmp.oGQL7L4YLK ++ return 0 + [[ 2 == \2 ]] + echo + desc 'Check replication works between source -> replica' + set +o xtrace ----------------------------------------------------------------------------------- Check replication works between source -> replica ----------------------------------------------------------------------------------- + compare_mysql_cmd select-2 'SELECT * from myApp.testSourceReplica;' '-h 34.118.232.34 -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 34.118.232.34 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-2-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.testSourceReplica;' '-h 34.118.232.34 -uroot -proot_password' + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 34.118.232.34 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0IaCQyHjes +++ mktemp ++ local LAST_ERR=/tmp/tmp.hXsmfIKI66 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0IaCQyHjes ++ cat /tmp/tmp.hXsmfIKI66 ++ rm /tmp/tmp.0IaCQyHjes /tmp/tmp.hXsmfIKI66 ++ return 0 + client_pod=pxc-client-59944c5bbf-rpn2d + wait_pod pxc-client-59944c5bbf-rpn2d + local pod=pxc-client-59944c5bbf-rpn2d + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-rpn2d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-rpn2d condition met waiting for pod/pxc-client-59944c5bbf-rpn2d to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.j31CMwHlyC/select-2.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-2.sql /tmp/tmp.j31CMwHlyC/select-2.sql --- /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/cross-site/compare/select-2.sql 2025-11-12 09:53:11.674180440 +0000 +++ /tmp/tmp.j31CMwHlyC/select-2.sql 2025-11-12 10:49:07.393527271 +0000 @@ -1 +1,2 @@ -100700 +ERROR 1146 (42S02) at line 1: Table 'myApp.testSourceReplica' doesn't exist +command terminated with exit code 1