Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/logs/cross-site-8-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + main + local source_cluster=cross-site-source + local replica_cluster=cross-site-replica + desc 'Create source cluster' + set +o xtrace ----------------------------------------------------------------------------------- Create source cluster ----------------------------------------------------------------------------------- + create_infra cross-site-1974 + local ns=cross-site-1974 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n cross-site-16046 cross-site-source --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-source patched + kubectl patch pxc -n cross-site-replica-19456 cross-site-replica --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.WgqsIEqopE ++ mktemp + local LAST_ERR=/tmp/tmp.FM4Gaw4XFN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WgqsIEqopE perconaxtradbcluster.pxc.percona.com "cross-site-source" deleted perconaxtradbcluster.pxc.percona.com "cross-site-replica" deleted + cat /tmp/tmp.FM4Gaw4XFN + rm /tmp/tmp.WgqsIEqopE /tmp/tmp.FM4Gaw4XFN + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.0WRHGFcFXo ++ mktemp + local LAST_ERR=/tmp/tmp.yDzU4Vqo3Z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0WRHGFcFXo perconaxtradbclusterbackup.pxc.percona.com "backup-minio-source" deleted perconaxtradbclusterbackup.pxc.percona.com "backup-minio-replica" deleted + cat /tmp/tmp.yDzU4Vqo3Z + rm /tmp/tmp.0WRHGFcFXo /tmp/tmp.yDzU4Vqo3Z + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ySiGkT773u ++ mktemp + local LAST_ERR=/tmp/tmp.2QmGz5yJJz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ySiGkT773u perconaxtradbclusterrestore.pxc.percona.com "backup-minio" deleted perconaxtradbclusterrestore.pxc.percona.com "backup-minio" deleted + cat /tmp/tmp.2QmGz5yJJz + rm /tmp/tmp.ySiGkT773u /tmp/tmp.2QmGz5yJJz + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + xargs kubectl delete ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.kJw6JjztL1 + local LAST_OUT=/tmp/tmp.TTw32RcN0R ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.4Y5LOL0I1V + local exit_status=0 + local LAST_ERR=/tmp/tmp.VnIGV1HIv2 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kJw6JjztL1 + cat /tmp/tmp.4Y5LOL0I1V + rm /tmp/tmp.kJw6JjztL1 /tmp/tmp.4Y5LOL0I1V + return 0 namespace "cross-site-16046" deleted namespace "cross-site-replica-19456" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TTw32RcN0R namespace "pxc-operator" deleted + cat /tmp/tmp.VnIGV1HIv2 + rm /tmp/tmp.TTw32RcN0R /tmp/tmp.VnIGV1HIv2 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bEKRgt0ONi ++ mktemp + local LAST_ERR=/tmp/tmp.9o6Lwoi7H2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bEKRgt0ONi namespace/pxc-operator created + cat /tmp/tmp.9o6Lwoi7H2 + rm /tmp/tmp.bEKRgt0ONi /tmp/tmp.9o6Lwoi7H2 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.h3vPQKq0aP +++ mktemp ++ local LAST_ERR=/tmp/tmp.y2x3bBcSet ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.h3vPQKq0aP ++ cat /tmp/tmp.y2x3bBcSet ++ rm /tmp/tmp.h3vPQKq0aP /tmp/tmp.y2x3bBcSet ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0kjxGmhtFd ++ mktemp + local LAST_ERR=/tmp/tmp.RW7FRNKuur + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0kjxGmhtFd Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.RW7FRNKuur + rm /tmp/tmp.0kjxGmhtFd /tmp/tmp.RW7FRNKuur + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7M4MMJUXcb ++ mktemp + local LAST_ERR=/tmp/tmp.Q51R9SHWja + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7M4MMJUXcb customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.Q51R9SHWja + rm /tmp/tmp.7M4MMJUXcb /tmp/tmp.Q51R9SHWja + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.uqt5PDiIxU ++ mktemp + local LAST_ERR=/tmp/tmp.kFjAbv1zd6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uqt5PDiIxU clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.kFjAbv1zd6 + rm /tmp/tmp.uqt5PDiIxU /tmp/tmp.kFjAbv1zd6 + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - ++ mktemp + local LAST_OUT=/tmp/tmp.IYN8cRoZXY ++ mktemp + local LAST_ERR=/tmp/tmp.N8V9Ny5WSZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IYN8cRoZXY deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.N8V9Ny5WSZ + rm /tmp/tmp.IYN8cRoZXY /tmp/tmp.N8V9Ny5WSZ + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.sYCIBt6Bwy ++ mktemp + local LAST_ERR=/tmp/tmp.ATEDhQLnOf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sYCIBt6Bwy pod/percona-xtradb-cluster-operator-7577b89745-bq2qg condition met + cat /tmp/tmp.ATEDhQLnOf + rm /tmp/tmp.sYCIBt6Bwy /tmp/tmp.ATEDhQLnOf + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.SAcdJw99BW +++ mktemp ++ local LAST_ERR=/tmp/tmp.aFG5Avr0G5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SAcdJw99BW ++ cat /tmp/tmp.aFG5Avr0G5 ++ rm /tmp/tmp.SAcdJw99BW /tmp/tmp.aFG5Avr0G5 ++ return 0 + wait_pod percona-xtradb-cluster-operator-7577b89745-bq2qg 480 pxc-operator + local pod=percona-xtradb-cluster-operator-7577b89745-bq2qg + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-7577b89745-bq2qg ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-7577b89745-bq2qg condition met percona-xtradb-cluster-operator-7577b89745-bq2qg.Ok + sleep 3 + create_namespace cross-site-1974 + local namespace=cross-site-1974 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-1974' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-1974 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-1974 ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.zWTFImRDrQ + local LAST_OUT=/tmp/tmp.ALNP2LmOTV ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.OIXdsbDPnQ + local exit_status=0 + local LAST_ERR=/tmp/tmp.KmxhJFd4Kl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-1974 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + awk '{print$1}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ALNP2LmOTV + cat /tmp/tmp.KmxhJFd4Kl + rm /tmp/tmp.ALNP2LmOTV /tmp/tmp.KmxhJFd4Kl + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-1974 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.zWTFImRDrQ + cat /tmp/tmp.OIXdsbDPnQ Error from server (NotFound): namespaces "cross-site-1974" not found + rm /tmp/tmp.zWTFImRDrQ /tmp/tmp.OIXdsbDPnQ + return 1 + : + wait_for_delete namespace/cross-site-1974 + local res=namespace/cross-site-1974 + echo -n 'namespace/cross-site-1974 - ' namespace/cross-site-1974 - + set +o xtrace Error from server (NotFound): namespaces "cross-site-1974" not found + desc 'create namespace cross-site-1974' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-1974 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.dWUl7QB665 ++ mktemp + local LAST_ERR=/tmp/tmp.8mcbnL5APt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cross-site-1974 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dWUl7QB665 namespace/cross-site-1974 created + cat /tmp/tmp.8mcbnL5APt + rm /tmp/tmp.dWUl7QB665 /tmp/tmp.8mcbnL5APt + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.dklEncHRsA +++ mktemp ++ local LAST_ERR=/tmp/tmp.rRiFlRgnXp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dklEncHRsA ++ cat /tmp/tmp.rRiFlRgnXp ++ rm /tmp/tmp.dklEncHRsA /tmp/tmp.rRiFlRgnXp ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.tdrH5Xohw2 ++ mktemp + local LAST_ERR=/tmp/tmp.8YKXhS9rs2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tdrH5Xohw2 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.8YKXhS9rs2 + rm /tmp/tmp.tdrH5Xohw2 /tmp/tmp.8YKXhS9rs2 + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Mw7c9IGZL5 ++ mktemp + local LAST_ERR=/tmp/tmp.jl1VoJTTI4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Mw7c9IGZL5 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.jl1VoJTTI4 + rm /tmp/tmp.Mw7c9IGZL5 /tmp/tmp.jl1VoJTTI4 + return 0 + start_minio + deploy_helm cross-site-1974 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Thu Jul 4 14:46:43 2024 NAMESPACE: cross-site-1974 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.cross-site-1974.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace cross-site-1974 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace cross-site-1974 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace cross-site-1974 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace cross-site-1974 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UkVvq8qwJ9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KGekwNeW1v ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UkVvq8qwJ9 ++ cat /tmp/tmp.KGekwNeW1v ++ rm /tmp/tmp.UkVvq8qwJ9 /tmp/tmp.KGekwNeW1v ++ return 0 + MINIO_POD=minio-service-76ffcfd45-j62bv + wait_pod minio-service-76ffcfd45-j62bv + local pod=minio-service-76ffcfd45-j62bv + local max_retry=480 + local ns= ++ echo minio-service-76ffcfd45-j62bv ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minio-service-76ffcfd45-j62bv condition met minio-service-76ffcfd45-j62bv.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.LlazXVuihV ++ mktemp + local LAST_ERR=/tmp/tmp.rTort9jrEV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LlazXVuihV make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.rTort9jrEV If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: unable to upgrade connection: container aws-cli not found in pod aws-cli_cross-site-1974 + rm /tmp/tmp.LlazXVuihV /tmp/tmp.rTort9jrEV + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.avXbOn3QtO ++ mktemp + local LAST_ERR=/tmp/tmp.2IZf651z2t + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.avXbOn3QtO secret/minio-secret unchanged secret/aws-s3-secret unchanged secret/gcp-cs-secret unchanged secret/azure-secret unchanged + cat /tmp/tmp.2IZf651z2t + rm /tmp/tmp.avXbOn3QtO /tmp/tmp.2IZf651z2t + return 0 + spinup_pxc cross-site-source /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-source.yml + local cluster=cross-site-source + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-source.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VWZXd6IP96 ++ mktemp + local LAST_ERR=/tmp/tmp.juIupiqmo5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VWZXd6IP96 secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.juIupiqmo5 + rm /tmp/tmp.VWZXd6IP96 /tmp/tmp.juIupiqmo5 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + local LAST_OUT=/tmp/tmp.s8S9Od5svs ++ mktemp + local LAST_ERR=/tmp/tmp.hLIRIb0XSZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.s8S9Od5svs deployment.apps/pxc-client created + cat /tmp/tmp.hLIRIb0XSZ + rm /tmp/tmp.s8S9Od5svs /tmp/tmp.hLIRIb0XSZ + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-source.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-source.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-source.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.O6paZtFarn + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + local LAST_ERR=/tmp/tmp.Z0oBLzSoM2 + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.O6paZtFarn perconaxtradbcluster.pxc.percona.com/cross-site-source created + cat /tmp/tmp.Z0oBLzSoM2 + rm /tmp/tmp.O6paZtFarn /tmp/tmp.Z0oBLzSoM2 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy cross-site-source ++ local target_cluster=cross-site-source +++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.N7Sn2sCEx8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.T9heMcVu1n +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.N7Sn2sCEx8 +++ cat /tmp/tmp.T9heMcVu1n +++ rm /tmp/tmp.N7Sn2sCEx8 /tmp/tmp.T9heMcVu1n +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo cross-site-source-haproxy ++ return + local proxy=cross-site-source-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.UlfJ2qPbHf ++ mktemp + local LAST_ERR=/tmp/tmp.cFwJSUOWmp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.UlfJ2qPbHf + cat /tmp/tmp.cFwJSUOWmp error: no matching resources found + rm /tmp/tmp.UlfJ2qPbHf /tmp/tmp.cFwJSUOWmp + return 1 + true + wait_for_running cross-site-source-haproxy 1 + local name=cross-site-source-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-haproxy-0 480 + local pod=cross-site-source-haproxy-0 + local max_retry=480 + local ns= ++ echo cross-site-source-haproxy-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace Error from server (NotFound): pods "cross-site-source-haproxy-0" not found cross-site-source-haproxy-0.....................................Defaulted container "haproxy" out of: haproxy, pxc-monit, pxc-init (init) .Ok + wait_for_running cross-site-source-pxc 3 + local name=cross-site-source-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-0 480 + local pod=cross-site-source-pxc-0 + local max_retry=480 + local ns= ++ echo cross-site-source-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-source-pxc-0 condition met cross-site-source-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-1 480 + local pod=cross-site-source-pxc-1 + local max_retry=480 + local ns= ++ echo cross-site-source-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-source-pxc-1 condition met cross-site-source-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-source-pxc-2 480 + local pod=cross-site-source-pxc-2 + local max_retry=480 + local ns= ++ echo cross-site-source-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-source-pxc-2 condition met cross-site-source-pxc-2.Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h cross-site-source-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h cross-site-source-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z9Yvjw7TgG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yc9XxvuLYM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.z9Yvjw7TgG ++ cat /tmp/tmp.yc9XxvuLYM ++ rm /tmp/tmp.z9Yvjw7TgG /tmp/tmp.yc9XxvuLYM ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h cross-site-source-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h cross-site-source-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xBPR2ewnoM +++ mktemp ++ local LAST_ERR=/tmp/tmp.3OXhHPBPlG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xBPR2ewnoM ++ cat /tmp/tmp.3OXhHPBPlG ++ rm /tmp/tmp.xBPR2ewnoM /tmp/tmp.3OXhHPBPlG ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-0.cross-site-source-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YTUw6awQCG +++ mktemp ++ local LAST_ERR=/tmp/tmp.DKK37jNmwh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YTUw6awQCG ++ cat /tmp/tmp.DKK37jNmwh ++ rm /tmp/tmp.YTUw6awQCG /tmp/tmp.DKK37jNmwh ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-1.cross-site-source-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LRBo7OsBne +++ mktemp ++ local LAST_ERR=/tmp/tmp.WFHHibMVCn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LRBo7OsBne ++ cat /tmp/tmp.WFHHibMVCn ++ rm /tmp/tmp.LRBo7OsBne /tmp/tmp.WFHHibMVCn ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-source-pxc-2.cross-site-source-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vt9j8mz56e +++ mktemp ++ local LAST_ERR=/tmp/tmp.abW9xyQVUT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vt9j8mz56e ++ cat /tmp/tmp.abW9xyQVUT ++ rm /tmp/tmp.vt9j8mz56e /tmp/tmp.abW9xyQVUT ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql ++ is_keyring_plugin_in_use cross-site-source ++ local cluster=cross-site-source ++ kubectl_bin exec -it cross-site-source-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e5DlDtisRR +++ mktemp ++ local LAST_ERR=/tmp/tmp.qolPXVPNOB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it cross-site-source-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.e5DlDtisRR ++ cat /tmp/tmp.qolPXVPNOB Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.e5DlDtisRR /tmp/tmp.qolPXVPNOB ++ return 0 + '[' '' ']' + sleep 60 + desc 'get main cluster services endpoints' + set +o xtrace ----------------------------------------------------------------------------------- get main cluster services endpoints ----------------------------------------------------------------------------------- ++ get_service_ip cross-site-source-pxc-0 ++ local service=cross-site-source-pxc-0 ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mKUHdmy2Ta ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nw07VisqVd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.mKUHdmy2Ta +++ cat /tmp/tmp.nw07VisqVd +++ rm /tmp/tmp.mKUHdmy2Ta /tmp/tmp.nw07VisqVd +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X0Lo7UKLsC +++ mktemp ++ local LAST_ERR=/tmp/tmp.XRJWkNGUjW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.X0Lo7UKLsC ++ cat /tmp/tmp.XRJWkNGUjW ++ rm /tmp/tmp.X0Lo7UKLsC /tmp/tmp.XRJWkNGUjW ++ return 0 ++ return + source_endpoint0=10.215.26.231 ++ get_service_ip cross-site-source-pxc-1 ++ local service=cross-site-source-pxc-1 ++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1DeElSvysE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FXAM8YLu0h +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.1DeElSvysE +++ cat /tmp/tmp.FXAM8YLu0h +++ rm /tmp/tmp.1DeElSvysE /tmp/tmp.FXAM8YLu0h +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ItDuHRevc2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bnhqOA5r1f ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ItDuHRevc2 ++ cat /tmp/tmp.bnhqOA5r1f ++ rm /tmp/tmp.ItDuHRevc2 /tmp/tmp.bnhqOA5r1f ++ return 0 ++ return + source_endpoint1=10.215.23.90 ++ get_service_ip cross-site-source-pxc-2 ++ local service=cross-site-source-pxc-2 ++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Wq223ESpOc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CSZ8nGFrrh +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Wq223ESpOc +++ cat /tmp/tmp.CSZ8nGFrrh +++ rm /tmp/tmp.Wq223ESpOc /tmp/tmp.CSZ8nGFrrh +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P03bwdpaAp +++ mktemp ++ local LAST_ERR=/tmp/tmp.I05cixT7UB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.P03bwdpaAp ++ cat /tmp/tmp.I05cixT7UB ++ rm /tmp/tmp.P03bwdpaAp /tmp/tmp.I05cixT7UB ++ return 0 ++ return + source_endpoint2=10.215.30.96 ++ run_mysql 'SELECT @@hostname hostname;' '-h cross-site-source-haproxy -p33062 -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h cross-site-source-haproxy -p33062 -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ixoPgrvgPM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YPSal1OI4r +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ixoPgrvgPM +++ cat /tmp/tmp.YPSal1OI4r +++ rm /tmp/tmp.ixoPgrvgPM /tmp/tmp.YPSal1OI4r +++ return 0 ++ client_pod=pxc-client-6644d8898f-lkk47 ++ wait_pod pxc-client-6644d8898f-lkk47 ++ local pod=pxc-client-6644d8898f-lkk47 ++ local max_retry=480 ++ local ns= +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' +++ echo pxc-client-6644d8898f-lkk47 ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok ++ set +o xtrace + source_primary=cross-site-source-pxc-0 ++ get_service_ip cross-site-source-pxc-0 ++ local service=cross-site-source-pxc-0 ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dSXLdgn0pT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gOzPk4lX9I +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.dSXLdgn0pT +++ cat /tmp/tmp.gOzPk4lX9I +++ rm /tmp/tmp.dSXLdgn0pT /tmp/tmp.gOzPk4lX9I +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k0PkOID2Xz +++ mktemp ++ local LAST_ERR=/tmp/tmp.FIPXyeMC5P ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-source-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.k0PkOID2Xz ++ cat /tmp/tmp.FIPXyeMC5P ++ rm /tmp/tmp.k0PkOID2Xz /tmp/tmp.FIPXyeMC5P ++ return 0 ++ return + source_primary_endpoint=10.215.26.231 + desc 'patch source cluster with replicationChannels settings' + set +o xtrace ----------------------------------------------------------------------------------- patch source cluster with replicationChannels settings ----------------------------------------------------------------------------------- + kubectl_bin patch pxc cross-site-source --type=merge --patch '{"spec": {"pxc":{"replicationChannels": [{"name":"source_to_replica", "isSource": true}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.vUaS22YH4D ++ mktemp + local LAST_ERR=/tmp/tmp.U2loFctvv0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-source --type=merge --patch '{"spec": {"pxc":{"replicationChannels": [{"name":"source_to_replica", "isSource": true}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vUaS22YH4D perconaxtradbcluster.pxc.percona.com/cross-site-source patched + cat /tmp/tmp.U2loFctvv0 + rm /tmp/tmp.vUaS22YH4D /tmp/tmp.U2loFctvv0 + return 0 + desc 'patch main cluster secrets with replication user' + set +o xtrace ----------------------------------------------------------------------------------- patch main cluster secrets with replication user ----------------------------------------------------------------------------------- ++ echo -n new_password ++ base64 + kubectl_bin patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.94VbW2LfNU ++ mktemp + local LAST_ERR=/tmp/tmp.QIiMIr0lN8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.94VbW2LfNU secret/my-cluster-secrets patched + cat /tmp/tmp.QIiMIr0lN8 + rm /tmp/tmp.94VbW2LfNU /tmp/tmp.QIiMIr0lN8 + return 0 + sleep 15 + wait_cluster_consistency cross-site-source 3 2 + local cluster_name=cross-site-source + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vziuI7CGpw +++ mktemp ++ local LAST_ERR=/tmp/tmp.BDy8KPpOZZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vziuI7CGpw ++ cat /tmp/tmp.BDy8KPpOZZ ++ rm /tmp/tmp.vziuI7CGpw /tmp/tmp.BDy8KPpOZZ ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9LCp5SOxyn +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZphvXZ8ohi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9LCp5SOxyn ++ cat /tmp/tmp.ZphvXZ8ohi ++ rm /tmp/tmp.9LCp5SOxyn /tmp/tmp.ZphvXZ8ohi ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-source +++ local cluster_name=cross-site-source ++++ get_proxy cross-site-source ++++ local target_cluster=cross-site-source +++++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.2nLRhNSlMN ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.gTSeHtMYFN +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.2nLRhNSlMN +++++ cat /tmp/tmp.gTSeHtMYFN +++++ rm /tmp/tmp.2nLRhNSlMN /tmp/tmp.gTSeHtMYFN +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-source-haproxy ++++ return +++ local cluster_proxy=cross-site-source-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.54LYb0WwaB +++ mktemp ++ local LAST_ERR=/tmp/tmp.RrVNraVP8T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.54LYb0WwaB ++ cat /tmp/tmp.RrVNraVP8T ++ rm /tmp/tmp.54LYb0WwaB /tmp/tmp.RrVNraVP8T ++ return 0 + [[ 2 == \2 ]] + desc 'write data to source cluster' + set +o xtrace ----------------------------------------------------------------------------------- write data to source cluster ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testSourceReplica (id int PRIMARY KEY);' '-h 10.215.26.231 -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testSourceReplica (id int PRIMARY KEY);' + local 'uri=-h 10.215.26.231 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xI0QS26JIA +++ mktemp ++ local LAST_ERR=/tmp/tmp.BQcEVXUHCA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xI0QS26JIA ++ cat /tmp/tmp.BQcEVXUHCA ++ rm /tmp/tmp.xI0QS26JIA /tmp/tmp.BQcEVXUHCA ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + run_mysql 'INSERT myApp.testSourceReplica (id) VALUES (100700)' '-h 10.215.26.231 -uroot -proot_password' + local 'command=INSERT myApp.testSourceReplica (id) VALUES (100700)' + local 'uri=-h 10.215.26.231 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jyyOocGOzv +++ mktemp ++ local LAST_ERR=/tmp/tmp.oB1hKGnLzm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jyyOocGOzv ++ cat /tmp/tmp.oB1hKGnLzm ++ rm /tmp/tmp.jyyOocGOzv /tmp/tmp.oB1hKGnLzm ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + kubectl_bin get secrets cross-site-source-ssl-internal -o yaml + yq 'del(.metadata)' - + yq '.metadata={"name": "cross-site-replica-ssl-internal"}' - ++ mktemp + local LAST_OUT=/tmp/tmp.oYCDkgAkU7 ++ mktemp + local LAST_ERR=/tmp/tmp.F37xcrxPr6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get secrets cross-site-source-ssl-internal -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oYCDkgAkU7 + cat /tmp/tmp.F37xcrxPr6 + rm /tmp/tmp.oYCDkgAkU7 /tmp/tmp.F37xcrxPr6 + return 0 + desc 'take backup of source cluster' + set +o xtrace ----------------------------------------------------------------------------------- take backup of source cluster ----------------------------------------------------------------------------------- + run_backup cross-site-source backup-minio-source + local cluster=cross-site-source + local backup1=backup-minio-source + desc 'make backup backup-minio-source' + set +o xtrace ----------------------------------------------------------------------------------- make backup backup-minio-source ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/backup-minio-source.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kHM5MUMFkR ++ mktemp + local LAST_ERR=/tmp/tmp.IUhj2VFoBi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/backup-minio-source.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kHM5MUMFkR perconaxtradbclusterbackup.pxc.percona.com/backup-minio-source created + cat /tmp/tmp.IUhj2VFoBi + rm /tmp/tmp.kHM5MUMFkR /tmp/tmp.IUhj2VFoBi + return 0 + wait_backup backup-minio-source + local backup=backup-minio-source + local status=Succeeded + set +o xtrace backup-minio-source................Succeeded + desc 'create replica cluster' + set +o xtrace ----------------------------------------------------------------------------------- create replica cluster ----------------------------------------------------------------------------------- + create_namespace cross-site-replica-28611 0 + local namespace=cross-site-replica-28611 + local skip_clean_namespace=0 + [[ 1 == 1 ]] + [[ -z 0 ]] + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-replica-28611' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-replica-28611 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-replica-28611 ++ mktemp + local LAST_OUT=/tmp/tmp.mwhs6FmUyf ++ mktemp + local LAST_ERR=/tmp/tmp.q1ky7Au1p7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-28611 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-28611 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-replica-28611 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.mwhs6FmUyf + cat /tmp/tmp.q1ky7Au1p7 Error from server (NotFound): namespaces "cross-site-replica-28611" not found + rm /tmp/tmp.mwhs6FmUyf /tmp/tmp.q1ky7Au1p7 + return 1 + : + wait_for_delete namespace/cross-site-replica-28611 + local res=namespace/cross-site-replica-28611 + echo -n 'namespace/cross-site-replica-28611 - ' namespace/cross-site-replica-28611 - + set +o xtrace Error from server (NotFound): namespaces "cross-site-replica-28611" not found + desc 'create namespace cross-site-replica-28611' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-replica-28611 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-replica-28611 ++ mktemp + local LAST_OUT=/tmp/tmp.CzCfsyI6xJ ++ mktemp + local LAST_ERR=/tmp/tmp.BcNh5F78sW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cross-site-replica-28611 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CzCfsyI6xJ namespace/cross-site-replica-28611 created + cat /tmp/tmp.BcNh5F78sW + rm /tmp/tmp.CzCfsyI6xJ /tmp/tmp.BcNh5F78sW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RVe7o2ih88 +++ mktemp ++ local LAST_ERR=/tmp/tmp.b4DGABGbSc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RVe7o2ih88 ++ cat /tmp/tmp.b4DGABGbSc ++ rm /tmp/tmp.RVe7o2ih88 /tmp/tmp.b4DGABGbSc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-replica-28611 ++ mktemp + local LAST_OUT=/tmp/tmp.tgsEL3qN26 ++ mktemp + local LAST_ERR=/tmp/tmp.2GLZ52DItB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-replica-28611 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tgsEL3qN26 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.2GLZ52DItB + rm /tmp/tmp.tgsEL3qN26 /tmp/tmp.2GLZ52DItB + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.9VEhjaLv7f ++ mktemp + local LAST_ERR=/tmp/tmp.QoM4SjPNyJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9VEhjaLv7f customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.QoM4SjPNyJ + rm /tmp/tmp.9VEhjaLv7f /tmp/tmp.QoM4SjPNyJ + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + sed -e 's^namespace: .*^namespace: pxc-operator^' + local LAST_OUT=/tmp/tmp.qNQInTBNQq ++ mktemp + local LAST_ERR=/tmp/tmp.gFPo5mE5jW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qNQInTBNQq clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.gFPo5mE5jW + rm /tmp/tmp.qNQInTBNQq /tmp/tmp.gFPo5mE5jW + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - ++ mktemp + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + local LAST_OUT=/tmp/tmp.XmkuXfNx3f ++ mktemp + local LAST_ERR=/tmp/tmp.bOX31dUQGX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XmkuXfNx3f deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.bOX31dUQGX + rm /tmp/tmp.XmkuXfNx3f /tmp/tmp.bOX31dUQGX + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.AH5Q2DOWoC ++ mktemp + local LAST_ERR=/tmp/tmp.VCgCwz1Pep + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.AH5Q2DOWoC + cat /tmp/tmp.VCgCwz1Pep error: timed out waiting for the condition on pods/percona-xtradb-cluster-operator-7577b89745-8b7rt + rm /tmp/tmp.AH5Q2DOWoC /tmp/tmp.VCgCwz1Pep + return 1 + true ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.MSc6u3OSi7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rUOlXjD2ju ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MSc6u3OSi7 ++ cat /tmp/tmp.rUOlXjD2ju ++ rm /tmp/tmp.MSc6u3OSi7 /tmp/tmp.rUOlXjD2ju ++ return 0 + wait_pod percona-xtradb-cluster-operator-7577b89745-bq2qg 480 pxc-operator + local pod=percona-xtradb-cluster-operator-7577b89745-bq2qg + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-7577b89745-bq2qg ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-7577b89745-bq2qg condition met percona-xtradb-cluster-operator-7577b89745-bq2qg.Ok + sleep 3 + kubectl_bin apply -f /tmp/tmp.7Me1pw3ROq/replica-ssl-internal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.mRjhTVV4B7 ++ mktemp + local LAST_ERR=/tmp/tmp.yT78yRH9aF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.7Me1pw3ROq/replica-ssl-internal.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mRjhTVV4B7 secret/cross-site-replica-ssl-internal created + cat /tmp/tmp.yT78yRH9aF + rm /tmp/tmp.mRjhTVV4B7 /tmp/tmp.yT78yRH9aF + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4D3Xx05H8w ++ mktemp + local LAST_ERR=/tmp/tmp.Bkpj5vSyky + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4D3Xx05H8w secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.Bkpj5vSyky + rm /tmp/tmp.4D3Xx05H8w /tmp/tmp.Bkpj5vSyky + return 0 + spinup_pxc cross-site-replica /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-replica.yml + local cluster=cross-site-replica + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-replica.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RL5ivMFO1r ++ mktemp + local LAST_ERR=/tmp/tmp.yawSE8zilE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RL5ivMFO1r secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.yawSE8zilE + rm /tmp/tmp.RL5ivMFO1r /tmp/tmp.yawSE8zilE + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yySsTlvKV9 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.vtecoP0CVd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yySsTlvKV9 deployment.apps/pxc-client created + cat /tmp/tmp.vtecoP0CVd + rm /tmp/tmp.yySsTlvKV9 /tmp/tmp.vtecoP0CVd + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-replica.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-replica.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/cross-site-replica.yml + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1745-2e9e79b9#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.uNk0POWvpZ ++ mktemp + local LAST_ERR=/tmp/tmp.G8rKd8KTzF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uNk0POWvpZ perconaxtradbcluster.pxc.percona.com/cross-site-replica created + cat /tmp/tmp.G8rKd8KTzF + rm /tmp/tmp.uNk0POWvpZ /tmp/tmp.G8rKd8KTzF + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy cross-site-replica ++ local target_cluster=cross-site-replica +++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.M32nzhvqLv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qXYlg3PwNV +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.M32nzhvqLv +++ cat /tmp/tmp.qXYlg3PwNV +++ rm /tmp/tmp.M32nzhvqLv /tmp/tmp.qXYlg3PwNV +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo cross-site-replica-haproxy ++ return + local proxy=cross-site-replica-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.0ErR5nNfhp ++ mktemp + local LAST_ERR=/tmp/tmp.DaYxWOHdmt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n cross-site-1974 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.0ErR5nNfhp + cat /tmp/tmp.DaYxWOHdmt error: no matching resources found + rm /tmp/tmp.0ErR5nNfhp /tmp/tmp.DaYxWOHdmt + return 1 + true + wait_for_running cross-site-replica-haproxy 1 + local name=cross-site-replica-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-haproxy-0 480 + local pod=cross-site-replica-haproxy-0 + local max_retry=480 + local ns= ++ echo cross-site-replica-haproxy-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/cross-site-replica-haproxy-0 condition met cross-site-replica-haproxy-0Defaulted container "haproxy" out of: haproxy, pxc-monit, pxc-init (init) .Ok + wait_for_running cross-site-replica-pxc 3 + local name=cross-site-replica-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-0 480 + local pod=cross-site-replica-pxc-0 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-0 condition met cross-site-replica-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-1 480 + local pod=cross-site-replica-pxc-1 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-1 condition met cross-site-replica-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod cross-site-replica-pxc-2 480 + local pod=cross-site-replica-pxc-2 + local max_retry=480 + local ns= ++ echo cross-site-replica-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/cross-site-replica-pxc-2 condition met cross-site-replica-pxc-2.Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h cross-site-replica-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h cross-site-replica-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L4ZuLfkR4I +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ujvh74MCMn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.L4ZuLfkR4I ++ cat /tmp/tmp.Ujvh74MCMn ++ rm /tmp/tmp.L4ZuLfkR4I /tmp/tmp.Ujvh74MCMn ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h cross-site-replica-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h cross-site-replica-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.126iYkOnXl +++ mktemp ++ local LAST_ERR=/tmp/tmp.wR1BLvDsGW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.126iYkOnXl ++ cat /tmp/tmp.wR1BLvDsGW ++ rm /tmp/tmp.126iYkOnXl /tmp/tmp.wR1BLvDsGW ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-6644d8898f-z42ds + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-0.cross-site-replica-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mcMdilx70M +++ mktemp ++ local LAST_ERR=/tmp/tmp.FEAQpjdhIO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mcMdilx70M ++ cat /tmp/tmp.FEAQpjdhIO ++ rm /tmp/tmp.mcMdilx70M /tmp/tmp.FEAQpjdhIO ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-1.cross-site-replica-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IxCy1FeHuL +++ mktemp ++ local LAST_ERR=/tmp/tmp.r2E7irEPVY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IxCy1FeHuL ++ cat /tmp/tmp.r2E7irEPVY ++ rm /tmp/tmp.IxCy1FeHuL /tmp/tmp.r2E7irEPVY ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h cross-site-replica-pxc-2.cross-site-replica-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZHrTTdsJL7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZwDV194SSZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZHrTTdsJL7 ++ cat /tmp/tmp.ZwDV194SSZ ++ rm /tmp/tmp.ZHrTTdsJL7 /tmp/tmp.ZwDV194SSZ ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-1.sql /tmp/tmp.7Me1pw3ROq/select-1.sql ++ is_keyring_plugin_in_use cross-site-replica ++ local cluster=cross-site-replica ++ kubectl_bin exec -it cross-site-replica-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gCYf0FE0W4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hlYSmN47IR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it cross-site-replica-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gCYf0FE0W4 ++ cat /tmp/tmp.hlYSmN47IR Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.gCYf0FE0W4 /tmp/tmp.hlYSmN47IR ++ return 0 + '[' '' ']' + sleep 60 + desc 'restore backup from source cluster' + set +o xtrace ----------------------------------------------------------------------------------- restore backup from source cluster ----------------------------------------------------------------------------------- ++ kubectl_bin get -n cross-site-1974 pxc-backup backup-minio-source -o 'jsonpath={.status.destination}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SRpf0Kz8l8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zxm4A0Cz1E ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get -n cross-site-1974 pxc-backup backup-minio-source -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SRpf0Kz8l8 ++ cat /tmp/tmp.zxm4A0Cz1E ++ rm /tmp/tmp.SRpf0Kz8l8 /tmp/tmp.zxm4A0Cz1E ++ return 0 + destination=s3://operator-testing/cross-site-source-2024-07-04-14:55:01-full + /usr/bin/sed -e s~#cluster~cross-site-replica~ + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + /usr/bin/sed -e s~#destination~s3://operator-testing/cross-site-source-2024-07-04-14:55:01-full~ + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/restore-backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lQuhLcmKI4 ++ mktemp + local LAST_ERR=/tmp/tmp.Pf7gfc0ur8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lQuhLcmKI4 perconaxtradbclusterrestore.pxc.percona.com/backup-minio created + cat /tmp/tmp.Pf7gfc0ur8 + rm /tmp/tmp.lQuhLcmKI4 /tmp/tmp.Pf7gfc0ur8 + return 0 + wait_cluster_consistency cross-site-replica 3 2 + local cluster_name=cross-site-replica + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.23KrBLC2dO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ydMnWhMHaW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.23KrBLC2dO ++ cat /tmp/tmp.ydMnWhMHaW ++ rm /tmp/tmp.23KrBLC2dO /tmp/tmp.ydMnWhMHaW ++ return 0 + [[ stopping == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TVYrOv9okR +++ mktemp ++ local LAST_ERR=/tmp/tmp.iptGI4gYgd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TVYrOv9okR ++ cat /tmp/tmp.iptGI4gYgd ++ rm /tmp/tmp.TVYrOv9okR /tmp/tmp.iptGI4gYgd ++ return 0 + [[ stopping == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SX1EPChljY +++ mktemp ++ local LAST_ERR=/tmp/tmp.0KdKLBNDpV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SX1EPChljY ++ cat /tmp/tmp.0KdKLBNDpV ++ rm /tmp/tmp.SX1EPChljY /tmp/tmp.0KdKLBNDpV ++ return 0 + [[ paused == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4GJamfUutO +++ mktemp ++ local LAST_ERR=/tmp/tmp.aCm5vqt4HY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4GJamfUutO ++ cat /tmp/tmp.aCm5vqt4HY ++ rm /tmp/tmp.4GJamfUutO /tmp/tmp.aCm5vqt4HY ++ return 0 + [[ paused == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.au0v92ki66 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zLocGUJtMQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.au0v92ki66 ++ cat /tmp/tmp.zLocGUJtMQ ++ rm /tmp/tmp.au0v92ki66 /tmp/tmp.zLocGUJtMQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rVNVLGCiV3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a4ffOJeRN8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rVNVLGCiV3 ++ cat /tmp/tmp.a4ffOJeRN8 ++ rm /tmp/tmp.rVNVLGCiV3 /tmp/tmp.a4ffOJeRN8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PrSokwjbg8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8cn5aHN2i ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PrSokwjbg8 ++ cat /tmp/tmp.L8cn5aHN2i ++ rm /tmp/tmp.PrSokwjbg8 /tmp/tmp.L8cn5aHN2i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pnRC65gShY +++ mktemp ++ local LAST_ERR=/tmp/tmp.9MMzF5BnqJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pnRC65gShY ++ cat /tmp/tmp.9MMzF5BnqJ ++ rm /tmp/tmp.pnRC65gShY /tmp/tmp.9MMzF5BnqJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eodCz4Vpwj +++ mktemp ++ local LAST_ERR=/tmp/tmp.hoZUc85ZFo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.eodCz4Vpwj ++ cat /tmp/tmp.hoZUc85ZFo ++ rm /tmp/tmp.eodCz4Vpwj /tmp/tmp.hoZUc85ZFo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 8 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KF604QBe1F +++ mktemp ++ local LAST_ERR=/tmp/tmp.fddIKWH1in ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KF604QBe1F ++ cat /tmp/tmp.fddIKWH1in ++ rm /tmp/tmp.KF604QBe1F /tmp/tmp.fddIKWH1in ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 9 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NNer9zozh5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.i0yJo0y78Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NNer9zozh5 ++ cat /tmp/tmp.i0yJo0y78Y ++ rm /tmp/tmp.NNer9zozh5 /tmp/tmp.i0yJo0y78Y ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 10 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rlHwltokVf +++ mktemp ++ local LAST_ERR=/tmp/tmp.d8VYAiAozA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rlHwltokVf ++ cat /tmp/tmp.d8VYAiAozA ++ rm /tmp/tmp.rlHwltokVf /tmp/tmp.d8VYAiAozA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 11 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nGvxCLCuTt +++ mktemp ++ local LAST_ERR=/tmp/tmp.mm2CX898AH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nGvxCLCuTt ++ cat /tmp/tmp.mm2CX898AH ++ rm /tmp/tmp.nGvxCLCuTt /tmp/tmp.mm2CX898AH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 12 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iAnJWyB3xi +++ mktemp ++ local LAST_ERR=/tmp/tmp.rjwmRAxsfI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iAnJWyB3xi ++ cat /tmp/tmp.rjwmRAxsfI ++ rm /tmp/tmp.iAnJWyB3xi /tmp/tmp.rjwmRAxsfI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 13 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WpDtnXabBp +++ mktemp ++ local LAST_ERR=/tmp/tmp.jMaKnD7WPv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WpDtnXabBp ++ cat /tmp/tmp.jMaKnD7WPv ++ rm /tmp/tmp.WpDtnXabBp /tmp/tmp.jMaKnD7WPv ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cunuI0mJwW +++ mktemp ++ local LAST_ERR=/tmp/tmp.FjMOfLGnte ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cunuI0mJwW ++ cat /tmp/tmp.FjMOfLGnte ++ rm /tmp/tmp.cunuI0mJwW /tmp/tmp.FjMOfLGnte ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-replica +++ local cluster_name=cross-site-replica ++++ get_proxy cross-site-replica ++++ local target_cluster=cross-site-replica +++++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.KKyIEs7qnK ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.wkNFGsqhno +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.KKyIEs7qnK +++++ cat /tmp/tmp.wkNFGsqhno +++++ rm /tmp/tmp.KKyIEs7qnK /tmp/tmp.wkNFGsqhno +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-replica-haproxy ++++ return +++ local cluster_proxy=cross-site-replica-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NBwbpzFrY6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PKFVrNdPO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NBwbpzFrY6 ++ cat /tmp/tmp.4PKFVrNdPO ++ rm /tmp/tmp.NBwbpzFrY6 /tmp/tmp.4PKFVrNdPO ++ return 0 + [[ 2 == \2 ]] + desc 'get replica cluster services endpoints' + set +o xtrace ----------------------------------------------------------------------------------- get replica cluster services endpoints ----------------------------------------------------------------------------------- ++ get_service_ip cross-site-replica-pxc-0 ++ local service=cross-site-replica-pxc-0 ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S7qYEsjqSj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uoRkiKJFB0 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.S7qYEsjqSj +++ cat /tmp/tmp.uoRkiKJFB0 +++ rm /tmp/tmp.S7qYEsjqSj /tmp/tmp.uoRkiKJFB0 +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CABibJVk7F +++ mktemp ++ local LAST_ERR=/tmp/tmp.famj4qTnke ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CABibJVk7F ++ cat /tmp/tmp.famj4qTnke ++ rm /tmp/tmp.CABibJVk7F /tmp/tmp.famj4qTnke ++ return 0 ++ return + replica_endpoint0=10.215.21.117 ++ get_service_ip cross-site-replica-pxc-1 ++ local service=cross-site-replica-pxc-1 ++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.h1HI8ynpZo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kI9XROpV3u +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.h1HI8ynpZo +++ cat /tmp/tmp.kI9XROpV3u +++ rm /tmp/tmp.h1HI8ynpZo /tmp/tmp.kI9XROpV3u +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cvXpI2pwYy +++ mktemp ++ local LAST_ERR=/tmp/tmp.brYNzYp4Ja ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cvXpI2pwYy ++ cat /tmp/tmp.brYNzYp4Ja ++ rm /tmp/tmp.cvXpI2pwYy /tmp/tmp.brYNzYp4Ja ++ return 0 ++ return + replica_endpoint1=10.215.28.214 ++ get_service_ip cross-site-replica-pxc-2 ++ local service=cross-site-replica-pxc-2 ++ grep -q NotFound ++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LvEWq2f5eW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BVnFzuGi1S +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.LvEWq2f5eW +++ cat /tmp/tmp.BVnFzuGi1S +++ rm /tmp/tmp.LvEWq2f5eW /tmp/tmp.BVnFzuGi1S +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bbzcJjYl2A +++ mktemp ++ local LAST_ERR=/tmp/tmp.eFSIZabroj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bbzcJjYl2A ++ cat /tmp/tmp.eFSIZabroj ++ rm /tmp/tmp.bbzcJjYl2A /tmp/tmp.eFSIZabroj ++ return 0 ++ return + replica_endpoint2=10.215.21.26 ++ run_mysql 'SELECT @@hostname hostname;' '-h cross-site-replica-haproxy -p33062 -uroot -proot_password' ++ local 'command=SELECT @@hostname hostname;' ++ local 'uri=-h cross-site-replica-haproxy -p33062 -uroot -proot_password' +++ get_client_pod +++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uoyV4gisMH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G1Q5XGHcDU +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uoyV4gisMH +++ cat /tmp/tmp.G1Q5XGHcDU +++ rm /tmp/tmp.uoyV4gisMH /tmp/tmp.G1Q5XGHcDU +++ return 0 ++ client_pod=pxc-client-6644d8898f-z42ds ++ wait_pod pxc-client-6644d8898f-z42ds ++ local pod=pxc-client-6644d8898f-z42ds ++ local max_retry=480 ++ local ns= +++ echo pxc-client-6644d8898f-z42ds +++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' +++ egrep '^(pxc|proxysql)$' ++ local container= ++ set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok ++ set +o xtrace + replica_primary=cross-site-replica-pxc-0 ++ get_service_ip cross-site-replica-pxc-0 ++ local service=cross-site-replica-pxc-0 ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H7FS0xWPC1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WzlK7qvwst +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.H7FS0xWPC1 +++ cat /tmp/tmp.WzlK7qvwst +++ rm /tmp/tmp.H7FS0xWPC1 /tmp/tmp.WzlK7qvwst +++ return 0 ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t40FaBsO0H +++ mktemp ++ local LAST_ERR=/tmp/tmp.XUuieFyC0l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cross-site-replica-pxc-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.t40FaBsO0H ++ cat /tmp/tmp.XUuieFyC0l ++ rm /tmp/tmp.t40FaBsO0H /tmp/tmp.XUuieFyC0l ++ return 0 ++ return + replica_primary_endpoint=10.215.21.117 + run_mysql 'DELETE FROM myApp.myApp WHERE id=100500' '-h 10.215.21.117 -uroot -proot_password' + local 'command=DELETE FROM myApp.myApp WHERE id=100500' + local 'uri=-h 10.215.21.117 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I5PoAV1OAH +++ mktemp ++ local LAST_ERR=/tmp/tmp.kTnkrX9JPT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I5PoAV1OAH ++ cat /tmp/tmp.kTnkrX9JPT ++ rm /tmp/tmp.I5PoAV1OAH /tmp/tmp.kTnkrX9JPT ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + desc 'patch replica cluster with replicationChannels settings' + set +o xtrace ----------------------------------------------------------------------------------- patch replica cluster with replicationChannels settings ----------------------------------------------------------------------------------- + kubectl_bin patch pxc cross-site-replica --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "source_to_replica", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "10.215.26.231", "port": 3306, "weight": 100},{"host": "10.215.23.90", "port": 3306, "weight": 100},{"host": "10.215.30.96", "port": 3306, "weight": 100}]}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.GghCwPOVT4 ++ mktemp + local LAST_ERR=/tmp/tmp.EGHPCko6qx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-replica --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "source_to_replica", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "10.215.26.231", "port": 3306, "weight": 100},{"host": "10.215.23.90", "port": 3306, "weight": 100},{"host": "10.215.30.96", "port": 3306, "weight": 100}]}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GghCwPOVT4 perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + cat /tmp/tmp.EGHPCko6qx + rm /tmp/tmp.GghCwPOVT4 /tmp/tmp.EGHPCko6qx + return 0 + sleep 40 + desc 'patch replica cluster secrets with replication user' + set +o xtrace ----------------------------------------------------------------------------------- patch replica cluster secrets with replication user ----------------------------------------------------------------------------------- ++ echo -n new_password ++ base64 + kubectl_bin patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.o5daSTE74v ++ mktemp + local LAST_ERR=/tmp/tmp.t0jbH2ICgy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets -p '{"data":{"replication": "bmV3X3Bhc3N3b3Jk"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.o5daSTE74v secret/my-cluster-secrets patched + cat /tmp/tmp.t0jbH2ICgy + rm /tmp/tmp.o5daSTE74v /tmp/tmp.t0jbH2ICgy + return 0 + sleep 15 + wait_cluster_consistency cross-site-replica 3 2 + local cluster_name=cross-site-replica + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NTMgMUDF92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SeLIN3Pas0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NTMgMUDF92 ++ cat /tmp/tmp.SeLIN3Pas0 ++ rm /tmp/tmp.NTMgMUDF92 /tmp/tmp.SeLIN3Pas0 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5m3FKF3umk +++ mktemp ++ local LAST_ERR=/tmp/tmp.v9WaZ7hulz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5m3FKF3umk ++ cat /tmp/tmp.v9WaZ7hulz ++ rm /tmp/tmp.5m3FKF3umk /tmp/tmp.v9WaZ7hulz ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-replica +++ local cluster_name=cross-site-replica ++++ get_proxy cross-site-replica ++++ local target_cluster=cross-site-replica +++++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.eVVValN0sa ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Gh9bIpcleR +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-replica -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.eVVValN0sa +++++ cat /tmp/tmp.Gh9bIpcleR +++++ rm /tmp/tmp.eVVValN0sa /tmp/tmp.Gh9bIpcleR +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-replica-haproxy ++++ return +++ local cluster_proxy=cross-site-replica-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LYfQvUrCHq +++ mktemp ++ local LAST_ERR=/tmp/tmp.fwdQcByt0I ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-replica -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LYfQvUrCHq ++ cat /tmp/tmp.fwdQcByt0I ++ rm /tmp/tmp.LYfQvUrCHq /tmp/tmp.fwdQcByt0I ++ return 0 + [[ 2 == \2 ]] + desc 'Check replication works between source -> replica' + set +o xtrace ----------------------------------------------------------------------------------- Check replication works between source -> replica ----------------------------------------------------------------------------------- + compare_mysql_cmd select-2 'SELECT * from myApp.testSourceReplica;' '-h 10.215.21.117 -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.21.117 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2-80.sql ']' + run_mysql 'SELECT * from myApp.testSourceReplica;' '-h 10.215.21.117 -uroot -proot_password' + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.21.117 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qxL4IJLUCI +++ mktemp ++ local LAST_ERR=/tmp/tmp.PpVaizLwNM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qxL4IJLUCI ++ cat /tmp/tmp.PpVaizLwNM ++ rm /tmp/tmp.qxL4IJLUCI /tmp/tmp.PpVaizLwNM ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-2.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql /tmp/tmp.7Me1pw3ROq/select-2.sql + compare_mysql_cmd select-2 'SELECT * from myApp.testSourceReplica;' '-h 10.215.28.214 -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.28.214 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2-80.sql ']' + run_mysql 'SELECT * from myApp.testSourceReplica;' '-h 10.215.28.214 -uroot -proot_password' + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.28.214 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wwP5vaXmSo +++ mktemp ++ local LAST_ERR=/tmp/tmp.ldEdEB8T6t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wwP5vaXmSo ++ cat /tmp/tmp.ldEdEB8T6t ++ rm /tmp/tmp.wwP5vaXmSo /tmp/tmp.ldEdEB8T6t ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-2.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql /tmp/tmp.7Me1pw3ROq/select-2.sql + compare_mysql_cmd select-2 'SELECT * from myApp.testSourceReplica;' '-h 10.215.21.26 -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.21.26 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2-80.sql ']' + run_mysql 'SELECT * from myApp.testSourceReplica;' '-h 10.215.21.26 -uroot -proot_password' + local 'command=SELECT * from myApp.testSourceReplica;' + local 'uri=-h 10.215.21.26 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5NGn0Vujza +++ mktemp ++ local LAST_ERR=/tmp/tmp.mDnyx56HdQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5NGn0Vujza ++ cat /tmp/tmp.mDnyx56HdQ ++ rm /tmp/tmp.5NGn0Vujza /tmp/tmp.mDnyx56HdQ ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-2.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-2.sql /tmp/tmp.7Me1pw3ROq/select-2.sql + run_backup cross-site-replica backup-minio-replica + local cluster=cross-site-replica + local backup1=backup-minio-replica + desc 'make backup backup-minio-replica' + set +o xtrace ----------------------------------------------------------------------------------- make backup backup-minio-replica ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/backup-minio-replica.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7nv6loCrVJ ++ mktemp + local LAST_ERR=/tmp/tmp.2g0q6PAkSI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/backup-minio-replica.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7nv6loCrVJ perconaxtradbclusterbackup.pxc.percona.com/backup-minio-replica created + cat /tmp/tmp.2g0q6PAkSI + rm /tmp/tmp.7nv6loCrVJ /tmp/tmp.2g0q6PAkSI + return 0 + wait_backup backup-minio-replica + local backup=backup-minio-replica + local status=Succeeded + set +o xtrace backup-minio-replica...............Succeeded + desc 'Switch clusters over' + set +o xtrace ----------------------------------------------------------------------------------- Switch clusters over ----------------------------------------------------------------------------------- ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.VBcjntCrpZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.RLpcFEQFc8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VBcjntCrpZ ++ cat /tmp/tmp.RLpcFEQFc8 ++ rm /tmp/tmp.VBcjntCrpZ /tmp/tmp.RLpcFEQFc8 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.a3Vu2MJetm ++ mktemp + local LAST_ERR=/tmp/tmp.WXdF49qkfK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.a3Vu2MJetm Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.WXdF49qkfK + rm /tmp/tmp.a3Vu2MJetm /tmp/tmp.WXdF49qkfK + return 0 + desc 'rebuild source cluster' + set +o xtrace ----------------------------------------------------------------------------------- rebuild source cluster ----------------------------------------------------------------------------------- ++ kubectl_bin get -n cross-site-replica-28611 pxc-backup backup-minio-replica -o 'jsonpath={.status.destination}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZVsTkzHS7n +++ mktemp ++ local LAST_ERR=/tmp/tmp.hn0Q0ZI10D ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get -n cross-site-replica-28611 pxc-backup backup-minio-replica -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZVsTkzHS7n ++ cat /tmp/tmp.hn0Q0ZI10D ++ rm /tmp/tmp.ZVsTkzHS7n /tmp/tmp.hn0Q0ZI10D ++ return 0 + destination=s3://operator-testing/cross-site-replica-2024-07-04-15:10:53-full + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/conf/restore-backup-minio.yml + /usr/bin/sed -e s~#cluster~cross-site-source~ + /usr/bin/sed -e s~#destination~s3://operator-testing/cross-site-replica-2024-07-04-15:10:53-full~ + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.cross-site-1974~ + local LAST_OUT=/tmp/tmp.ZaKsQnFWD0 ++ mktemp + local LAST_ERR=/tmp/tmp.Dh5zCrqNMC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZaKsQnFWD0 perconaxtradbclusterrestore.pxc.percona.com/backup-minio created + cat /tmp/tmp.Dh5zCrqNMC + rm /tmp/tmp.ZaKsQnFWD0 /tmp/tmp.Dh5zCrqNMC + return 0 + wait_cluster_consistency cross-site-source 3 2 + local cluster_name=cross-site-source + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JDw4echPEu +++ mktemp ++ local LAST_ERR=/tmp/tmp.ms5Ck44h2g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JDw4echPEu ++ cat /tmp/tmp.ms5Ck44h2g ++ rm /tmp/tmp.JDw4echPEu /tmp/tmp.ms5Ck44h2g ++ return 0 + [[ stopping == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jYUlVVlSOe +++ mktemp ++ local LAST_ERR=/tmp/tmp.PKdxUymNlY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jYUlVVlSOe ++ cat /tmp/tmp.PKdxUymNlY ++ rm /tmp/tmp.jYUlVVlSOe /tmp/tmp.PKdxUymNlY ++ return 0 + [[ stopping == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dY9vCngZ7f +++ mktemp ++ local LAST_ERR=/tmp/tmp.r4WR5tQJ8Z ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dY9vCngZ7f ++ cat /tmp/tmp.r4WR5tQJ8Z ++ rm /tmp/tmp.dY9vCngZ7f /tmp/tmp.r4WR5tQJ8Z ++ return 0 + [[ stopping == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IlMZ6ckX5x +++ mktemp ++ local LAST_ERR=/tmp/tmp.EAWRrGS9nb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IlMZ6ckX5x ++ cat /tmp/tmp.EAWRrGS9nb ++ rm /tmp/tmp.IlMZ6ckX5x /tmp/tmp.EAWRrGS9nb ++ return 0 + [[ paused == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3aselzMnZx +++ mktemp ++ local LAST_ERR=/tmp/tmp.2YVKE2U2al ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3aselzMnZx ++ cat /tmp/tmp.2YVKE2U2al ++ rm /tmp/tmp.3aselzMnZx /tmp/tmp.2YVKE2U2al ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vj0UjFHMcr +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ttuwRCKTi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Vj0UjFHMcr ++ cat /tmp/tmp.0ttuwRCKTi ++ rm /tmp/tmp.Vj0UjFHMcr /tmp/tmp.0ttuwRCKTi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SXVz6P1FQK +++ mktemp ++ local LAST_ERR=/tmp/tmp.2iAOdvwwgd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SXVz6P1FQK ++ cat /tmp/tmp.2iAOdvwwgd ++ rm /tmp/tmp.SXVz6P1FQK /tmp/tmp.2iAOdvwwgd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s72xnWHmCW +++ mktemp ++ local LAST_ERR=/tmp/tmp.xVxb5D5uHQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.s72xnWHmCW ++ cat /tmp/tmp.xVxb5D5uHQ ++ rm /tmp/tmp.s72xnWHmCW /tmp/tmp.xVxb5D5uHQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RSuLMiBYIK +++ mktemp ++ local LAST_ERR=/tmp/tmp.XKzctvjbq2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RSuLMiBYIK ++ cat /tmp/tmp.XKzctvjbq2 ++ rm /tmp/tmp.RSuLMiBYIK /tmp/tmp.XKzctvjbq2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 8 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DtQ5mH88o1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EUObjRHP7J ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DtQ5mH88o1 ++ cat /tmp/tmp.EUObjRHP7J ++ rm /tmp/tmp.DtQ5mH88o1 /tmp/tmp.EUObjRHP7J ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 9 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.igqTKGHBjP +++ mktemp ++ local LAST_ERR=/tmp/tmp.VsRIeqNKmt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.igqTKGHBjP ++ cat /tmp/tmp.VsRIeqNKmt ++ rm /tmp/tmp.igqTKGHBjP /tmp/tmp.VsRIeqNKmt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 10 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DZwxEK9641 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xnW4aPoX3E ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DZwxEK9641 ++ cat /tmp/tmp.xnW4aPoX3E ++ rm /tmp/tmp.DZwxEK9641 /tmp/tmp.xnW4aPoX3E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 11 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zUV2ZHzdGa +++ mktemp ++ local LAST_ERR=/tmp/tmp.vs1NT0WM3x ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zUV2ZHzdGa ++ cat /tmp/tmp.vs1NT0WM3x ++ rm /tmp/tmp.zUV2ZHzdGa /tmp/tmp.vs1NT0WM3x ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 12 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WFcqj4vmXq +++ mktemp ++ local LAST_ERR=/tmp/tmp.tQe3cKvdJe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WFcqj4vmXq ++ cat /tmp/tmp.tQe3cKvdJe ++ rm /tmp/tmp.WFcqj4vmXq /tmp/tmp.tQe3cKvdJe ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yvJ0jcJzBv +++ mktemp ++ local LAST_ERR=/tmp/tmp.KePzjLXG0P ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yvJ0jcJzBv ++ cat /tmp/tmp.KePzjLXG0P ++ rm /tmp/tmp.yvJ0jcJzBv /tmp/tmp.KePzjLXG0P ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine cross-site-source +++ local cluster_name=cross-site-source ++++ get_proxy cross-site-source ++++ local target_cluster=cross-site-source +++++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Xff8eFjNKp ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.E9jjgb4dUi +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc cross-site-source -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.Xff8eFjNKp +++++ cat /tmp/tmp.E9jjgb4dUi +++++ rm /tmp/tmp.Xff8eFjNKp /tmp/tmp.E9jjgb4dUi +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo cross-site-source-haproxy ++++ return +++ local cluster_proxy=cross-site-source-haproxy +++ echo haproxy ++ kubectl_bin get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ksXOaLwV0r +++ mktemp ++ local LAST_ERR=/tmp/tmp.7fAhrIFblh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc cross-site-source -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ksXOaLwV0r ++ cat /tmp/tmp.7fAhrIFblh ++ rm /tmp/tmp.ksXOaLwV0r /tmp/tmp.7fAhrIFblh ++ return 0 + [[ 2 == \2 ]] + run_mysql 'DELETE FROM myApp.myApp WHERE id=100500' '-h 10.215.26.231 -uroot -proot_password' + local 'command=DELETE FROM myApp.myApp WHERE id=100500' + local 'uri=-h 10.215.26.231 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cw0vwikgbK +++ mktemp ++ local LAST_ERR=/tmp/tmp.JLtSFhVwwt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Cw0vwikgbK ++ cat /tmp/tmp.JLtSFhVwwt ++ rm /tmp/tmp.Cw0vwikgbK /tmp/tmp.JLtSFhVwwt ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + run_mysql 'DELETE FROM myApp.testSourceReplica WHERE id=100700' '-h 10.215.26.231 -uroot -proot_password' + local 'command=DELETE FROM myApp.testSourceReplica WHERE id=100700' + local 'uri=-h 10.215.26.231 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.79ZdwonHcX +++ mktemp ++ local LAST_ERR=/tmp/tmp.nk1sMgrYDe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.79ZdwonHcX ++ cat /tmp/tmp.nk1sMgrYDe ++ rm /tmp/tmp.79ZdwonHcX /tmp/tmp.nk1sMgrYDe ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + desc 'configure old replica as source' + set +o xtrace ----------------------------------------------------------------------------------- configure old replica as source ----------------------------------------------------------------------------------- + kubectl patch pxc cross-site-replica -n cross-site-replica-28611 --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/pxc/replicationChannels/0'\''}]' perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + kubectl_bin patch pxc cross-site-replica -n cross-site-replica-28611 --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "replica_to_source", "isSource": true}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.mSpIGI9xrV ++ mktemp + local LAST_ERR=/tmp/tmp.G42Bnm6dhJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-replica -n cross-site-replica-28611 --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "replica_to_source", "isSource": true}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mSpIGI9xrV perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + cat /tmp/tmp.G42Bnm6dhJ + rm /tmp/tmp.mSpIGI9xrV /tmp/tmp.G42Bnm6dhJ + return 0 + desc 'configure old source as replica' + set +o xtrace ----------------------------------------------------------------------------------- configure old source as replica ----------------------------------------------------------------------------------- + kubectl patch pxc cross-site-source -n cross-site-1974 --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/pxc/replicationChannels/0'\''}]' perconaxtradbcluster.pxc.percona.com/cross-site-source patched + kubectl_bin patch pxc cross-site-source -n cross-site-1974 --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "replica_to_source", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "10.215.21.117", "port": 3306, "weight": 100},{"host": "10.215.28.214", "port": 3306, "weight": 100},{"host": "10.215.21.26", "port": 3306, "weight": 100}]}]}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.4gB0EVXKz2 ++ mktemp + local LAST_ERR=/tmp/tmp.oAc7kD0R12 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc cross-site-source -n cross-site-1974 --type=merge --patch '{"spec": {"pxc": {"replicationChannels":[{"name": "replica_to_source", "isSource": false, "configuration": {"ssl": true, "sslSkipVerify": true, "ca": "/etc/mysql/ssl-internal/ca.crt"}, "sourcesList": [{"host": "10.215.21.117", "port": 3306, "weight": 100},{"host": "10.215.28.214", "port": 3306, "weight": 100},{"host": "10.215.21.26", "port": 3306, "weight": 100}]}]}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4gB0EVXKz2 perconaxtradbcluster.pxc.percona.com/cross-site-source patched + cat /tmp/tmp.oAc7kD0R12 + rm /tmp/tmp.4gB0EVXKz2 /tmp/tmp.oAc7kD0R12 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.tWRXbraxoS +++ mktemp ++ local LAST_ERR=/tmp/tmp.MQix86rLLO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tWRXbraxoS ++ cat /tmp/tmp.MQix86rLLO ++ rm /tmp/tmp.tWRXbraxoS /tmp/tmp.MQix86rLLO ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-replica-28611 ++ mktemp + local LAST_OUT=/tmp/tmp.gcXIkgL61S ++ mktemp + local LAST_ERR=/tmp/tmp.u3IcNeDrK2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-replica-28611 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gcXIkgL61S Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.u3IcNeDrK2 + rm /tmp/tmp.gcXIkgL61S /tmp/tmp.u3IcNeDrK2 + return 0 + desc 'Write data to replica cluster' + set +o xtrace ----------------------------------------------------------------------------------- Write data to replica cluster ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testReplicaSource (id int PRIMARY KEY);' '-h 10.215.21.117 -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS testReplicaSource (id int PRIMARY KEY);' + local 'uri=-h 10.215.21.117 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.erFoKzGX4n +++ mktemp ++ local LAST_ERR=/tmp/tmp.HWOZ3VxhUr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.erFoKzGX4n ++ cat /tmp/tmp.HWOZ3VxhUr ++ rm /tmp/tmp.erFoKzGX4n /tmp/tmp.HWOZ3VxhUr ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + run_mysql 'INSERT myApp.testReplicaSource (id) VALUES (100800)' '-h 10.215.21.117 -uroot -proot_password' + local 'command=INSERT myApp.testReplicaSource (id) VALUES (100800)' + local 'uri=-h 10.215.21.117 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pJPPSiYfG7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xMK4x6O66f ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pJPPSiYfG7 ++ cat /tmp/tmp.xMK4x6O66f ++ rm /tmp/tmp.pJPPSiYfG7 /tmp/tmp.xMK4x6O66f ++ return 0 + client_pod=pxc-client-6644d8898f-z42ds + wait_pod pxc-client-6644d8898f-z42ds + local pod=pxc-client-6644d8898f-z42ds + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-z42ds ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-z42ds condition met pxc-client-6644d8898f-z42ds.Ok + set +o xtrace + sleep 15 + desc 'Check replication works between replica -> source' + set +o xtrace ----------------------------------------------------------------------------------- Check replication works between replica -> source ----------------------------------------------------------------------------------- ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.CDz7GnBmLQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.w6HUeA9Mdy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CDz7GnBmLQ ++ cat /tmp/tmp.w6HUeA9Mdy ++ rm /tmp/tmp.CDz7GnBmLQ /tmp/tmp.w6HUeA9Mdy ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 ++ mktemp + local LAST_OUT=/tmp/tmp.7UcEuUEqZf ++ mktemp + local LAST_ERR=/tmp/tmp.K1DooMVwYe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3 --namespace=cross-site-1974 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7UcEuUEqZf Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1745-2e9e79b9-4-cluster3" modified. + cat /tmp/tmp.K1DooMVwYe + rm /tmp/tmp.7UcEuUEqZf /tmp/tmp.K1DooMVwYe + return 0 + compare_mysql_cmd select-3 'SELECT * from myApp.testReplicaSource;' '-h 10.215.26.231 -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.26.231 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3-80.sql ']' + run_mysql 'SELECT * from myApp.testReplicaSource;' '-h 10.215.26.231 -uroot -proot_password' + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.26.231 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.az3b5HIznr +++ mktemp ++ local LAST_ERR=/tmp/tmp.HoKyRiTPnn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.az3b5HIznr ++ cat /tmp/tmp.HoKyRiTPnn ++ rm /tmp/tmp.az3b5HIznr /tmp/tmp.HoKyRiTPnn ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-3.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql /tmp/tmp.7Me1pw3ROq/select-3.sql + compare_mysql_cmd select-3 'SELECT * from myApp.testReplicaSource;' '-h 10.215.23.90 -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.23.90 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3-80.sql ']' + run_mysql 'SELECT * from myApp.testReplicaSource;' '-h 10.215.23.90 -uroot -proot_password' + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.23.90 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rxaOIhduBw +++ mktemp ++ local LAST_ERR=/tmp/tmp.kbA3k0hsVR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rxaOIhduBw ++ cat /tmp/tmp.kbA3k0hsVR ++ rm /tmp/tmp.rxaOIhduBw /tmp/tmp.kbA3k0hsVR ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-3.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql /tmp/tmp.7Me1pw3ROq/select-3.sql + compare_mysql_cmd select-3 'SELECT * from myApp.testReplicaSource;' '-h 10.215.30.96 -uroot -proot_password' + local command_id=select-3 + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.30.96 -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3-80.sql ']' + run_mysql 'SELECT * from myApp.testReplicaSource;' '-h 10.215.30.96 -uroot -proot_password' + local 'command=SELECT * from myApp.testReplicaSource;' + local 'uri=-h 10.215.30.96 -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1hEr15Qzn4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Svt0wTRN7c ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1hEr15Qzn4 ++ cat /tmp/tmp.Svt0wTRN7c ++ rm /tmp/tmp.1hEr15Qzn4 /tmp/tmp.Svt0wTRN7c ++ return 0 + client_pod=pxc-client-6644d8898f-lkk47 + wait_pod pxc-client-6644d8898f-lkk47 + local pod=pxc-client-6644d8898f-lkk47 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-lkk47 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-lkk47 condition met pxc-client-6644d8898f-lkk47.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.7Me1pw3ROq/select-3.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1745/e2e-tests/cross-site/compare/select-3.sql /tmp/tmp.7Me1pw3ROq/select-3.sql + destroy cross-site-1974 true + local namespace=cross-site-1974 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + grep -v level=info ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.7Me1pw3ROq/operator.log ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.RTmPsY5kP3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2pOlwKAfjO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RTmPsY5kP3 ++ cat /tmp/tmp.2pOlwKAfjO ++ rm /tmp/tmp.RTmPsY5kP3 /tmp/tmp.2pOlwKAfjO ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-7577b89745-bq2qg ++ mktemp + local LAST_OUT=/tmp/tmp.4qtHbFph1X ++ mktemp + local LAST_ERR=/tmp/tmp.RRvTvlfcaO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-7577b89745-bq2qg + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4qtHbFph1X + cat /tmp/tmp.RRvTvlfcaO + rm /tmp/tmp.4qtHbFph1X /tmp/tmp.RRvTvlfcaO + return 0 2024-07-04T14:46:06.420Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.26.15-gke.1469001"} 2024-07-04T14:46:06.421Z INFO setup Manager starting up {"gitCommit": "2e9e79b989086189ac2f8051c20f0f1399b0f743", "gitBranch": "PR-1745-2e9e79b9", "buildTime": "2024-07-04T13:40:32Z", "goVersion": "go1.22.5", "os": "linux", "arch": "amd64"} 2024-07-04T14:46:06.421Z INFO setup Registering Components. 2024-07-04T14:46:09.174Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2024-07-04T14:46:09.177Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2024-07-04T14:46:09.177Z INFO controller-runtime.metrics Starting metrics server 2024-07-04T14:46:09.177Z INFO controller-runtime.webhook Starting webhook server 2024-07-04T14:46:09.177Z INFO setup Starting the Cmd. 2024-07-04T14:46:09.177Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2024-07-04T14:46:09.178Z INFO controller-runtime.certwatcher Starting certificate watcher 2024-07-04T14:46:09.178Z INFO controller-runtime.certwatcher Updated current TLS certificate 2024-07-04T14:46:09.178Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2024-07-04T14:46:09.379Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2024-07-04T14:46:09.403Z DEBUG events percona-xtradb-cluster-operator-7577b89745-bq2qg_cd70d54f-9d59-4d73-96d3-b226d1a0cf44 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"8a920a48-5bf1-4684-8d0d-30fd616d79c7","apiVersion":"coordination.k8s.io/v1","resourceVersion":"33747"}, "reason": "LeaderElection"} 2024-07-04T14:46:09.403Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxc-controller"} 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2024-07-04T14:46:09.509Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2024-07-04T14:46:09.510Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2024-07-04T14:46:09.515Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2024-07-04T14:48:06.918Z INFO Set CR version {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "6a480c61-e8fa-4619-8197-80a53a274133", "version": "1.15.0"} 2024-07-04T14:48:15.811Z INFO KubeAPIWarningLogger .metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: 8037a8e9-67aa-4295-9aab-a1dc315ac8d7 2024-07-04T14:49:34.604Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "operator"} 2024-07-04T14:49:34.646Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "monitor"} 2024-07-04T14:49:34.962Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:34.998Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:35.046Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "xtrabackup"} 2024-07-04T14:49:35.112Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:35.155Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "replication"} 2024-07-04T14:49:37.252Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.19.140:3306: connect: connection refused"} 2024-07-04T14:51:53.036Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "aa61417d-f0c4-4dd2-8875-253d2d015d8c", "user": "root"} 2024-07-04T14:51:53.278Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "aa61417d-f0c4-4dd2-8875-253d2d015d8c", "new version": "8.0.36-28.1"} 2024-07-04T14:54:24.571Z INFO Password changed, updating user {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.602Z INFO Password updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.612Z INFO MySQL init secret created {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "secret": "cross-site-source-mysql-init", "user": "replication"} 2024-07-04T14:54:24.623Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.652Z INFO Old password discarded {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:55:01.894Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "cross-site-1974", "name": "backup-minio-source", "reconcileID": "e434b7be-5264-4933-9a88-02967576e855", "Namespace": "cross-site-1974", "Name": "xb-backup-minio-source"} 2024-07-04T14:57:46.555Z INFO Set CR version {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "2131da7b-2f63-4b7f-b0f5-39e52635d4d3", "version": "1.15.0"} 2024-07-04T14:57:52.827Z INFO KubeAPIWarningLogger .metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: cb21d6ea-de72-4e44-ae0a-9ebe795cadaf 2024-07-04T14:59:06.246Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "operator"} 2024-07-04T14:59:06.283Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "monitor"} 2024-07-04T14:59:06.373Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.409Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.447Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "xtrabackup"} 2024-07-04T14:59:06.499Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.532Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "replication"} 2024-07-04T15:01:36.524Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "47c3f584-6025-45bf-bd86-5c1e9bc5153b", "user": "root"} 2024-07-04T15:01:36.729Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "47c3f584-6025-45bf-bd86-5c1e9bc5153b", "new version": "8.0.36-28.1"} 2024-07-04T15:03:52.455Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab"} 2024-07-04T15:03:52.500Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica"} 2024-07-04T15:04:38.623Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica", "backup": ""} 2024-07-04T15:05:13.699Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica"} 2024-07-04T15:06:08.832Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "38c91e79-2d44-464b-b676-e2144d2f0b42", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.31.143:3306: connect: connection refused"} 2024-07-04T15:08:43.953Z INFO You can view xtrabackup log: 2024-07-04T15:09:27.078Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-2"} 2024-07-04T15:09:27.097Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-0"} 2024-07-04T15:09:27.114Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-1"} 2024-07-04T15:09:27.182Z INFO Replication pod has changed {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "new replication pod": "cross-site-replica-pxc-0"} 2024-07-04T15:09:28.390Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "a207997c-3c11-45bd-88f7-edf703812251", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:32.719Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "fa585b58-4f05-49ed-ab82-5650b23a154c", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:38.079Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "03cf4612-96ca-4b9d-a411-6243df93b2b1", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:43.452Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4aac024d-e688-46ca-ba1f-021349edfe30", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:49.793Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "774197d2-982e-4b45-85b3-77f50bb87570", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:55.372Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "39cac6d0-297a-498e-b0a5-491f37889fac", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:00.808Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "af98d490-af2f-4002-8a13-29255b85145e", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:06.264Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "73d220a7-d8c5-4391-9b1c-f48a9ab82777", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:11.573Z INFO Password changed, updating user {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.605Z INFO Password updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.616Z INFO MySQL init secret created {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "secret": "cross-site-replica-mysql-init", "user": "replication"} 2024-07-04T15:10:11.626Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.652Z INFO Old password discarded {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.786Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "channel": "source_to_replica", "Last_IO_Error": ""} 2024-07-04T15:10:53.205Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio-replica", "reconcileID": "3e956558-8f49-410d-a3d8-9803866a8cde", "Namespace": "cross-site-replica-28611", "Name": "xb-backup-minio-replica"} 2024-07-04T15:11:26.164Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5"} 2024-07-04T15:11:26.272Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source"} 2024-07-04T15:11:50.978Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "71b5c358-3d5a-4857-9b1b-653812b7ee38", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:11:56.371Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "11d46050-fc06-4f4d-9a22-900aa7fc7762", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:01.753Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "037a34ef-1691-44b3-8e28-f3c87100b97f", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:07.196Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7248bb22-a4a1-46b0-8fc7-dbec4d4f960a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:12.653Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9ab0bbb7-e9a3-4aa3-9fed-e474f1b6d6aa", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:18.246Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4854ee42-1cfb-4542-9447-c3c9960bd240", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:23.420Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source", "backup": ""} 2024-07-04T15:12:24.784Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4216d9ef-10cd-4ee2-9df1-8e509d853b9d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:30.190Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e9857a81-a712-4511-82e6-7699ce1c0c66", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:35.648Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "3d8b004f-a0a0-4285-8677-11a9cf3dad28", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:40.480Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source"} 2024-07-04T15:12:41.191Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "6f09a370-dd7e-49d2-a038-ce102859da6e", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:46.544Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "a9150814-a3f5-45c8-820b-7b2c3b9b6251", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:51.980Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "8a6180b8-b133-4e86-bdce-8068f5734321", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:57.433Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9e38bd11-e83e-41d6-a1a1-0cca90feb170", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:02.891Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9d11da87-d168-4a7e-bb3c-29911c50aef0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:08.339Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "b65f0c2a-ae1b-49cf-aa7f-c8f2b10a4980", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:13.744Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7a78e391-7f15-4b47-a80b-b3917508c0aa", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:19.480Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "86f99d5e-dae5-4b2d-a0de-4dbd2e5974e5", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:21.997Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "4e88a23d-2757-489e-b523-7f938c2f48e5", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.19.140:3306: connect: connection refused"} 2024-07-04T15:13:25.057Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "edbd10a9-6bf0-486e-b42e-cf1b3910d973", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:30.753Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "16edc42a-f1c4-4c62-a6df-e6888fd793dc", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:36.190Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "11b3e643-5d77-43fa-8363-14b90b7a624a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:41.643Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "b90e4935-fdb3-45c6-b5a6-8373191d64f8", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:47.036Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "67b2a9cf-aedd-4fb9-93f7-72fcaae403ec", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:52.404Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "3706ba29-bf4e-49d7-aba5-298082db6821", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:57.937Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "696d8385-9dcd-456d-a785-cbda4499dcf2", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:03.413Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4fd458f6-62e6-4bee-b20f-cc8000d7a4c0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:09.066Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "49bd9e2b-89b4-48f8-bc5d-710f0509bed0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:14.504Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ef930124-25e7-4a26-be56-cfc1ff6f1701", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:19.955Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "dfe25063-c390-41cf-944f-8fa3e089aebb", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:25.343Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "8320d9f1-5d5e-46fe-a245-56a8a9cdc78e", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:30.791Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "728b6825-ca4c-44d6-b69d-2c1768121c07", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:36.461Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "fca4b246-289d-491c-a16d-3a4670fa5b79", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:41.925Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e371e6be-b67f-40d8-8547-3ed501adb541", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:47.338Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4a899592-a23f-449b-92cc-c3a3c7902c67", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:52.806Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60158c84-13a6-432b-8685-b514e8ce7adc", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:58.315Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4c73d013-555d-4842-bfaa-c8f87dc88eb1", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:03.739Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ea960baa-da38-4ccb-b084-c6892a4d467a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:09.416Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "bdb4d886-6f7a-4249-9653-654a5cb88200", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:14.871Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "1052107d-7268-4df8-bfa1-1cbe67f66d2d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:20.212Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "2687d16f-ef1c-43f8-bd5a-40ac99fbbd58", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:25.581Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7cd240d0-8e18-48b8-82b5-41de869fc438", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:31.003Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "471cbeb0-9443-4156-aab0-7c86cac89895", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:36.453Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e0cc0ec0-372a-4912-bd7a-77dbf62f10f9", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:41.837Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "5470b84a-16d2-41bb-8698-5f120908fa1c", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:47.340Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "34299ef1-cbe2-45d0-90ac-c9c59104cc83", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:49.676Z INFO You can view xtrabackup log: 2024-07-04T15:15:52.788Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "d7d78b59-fda3-4f10-a105-d8bd31edfc90", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:58.243Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e8d2120e-a9fb-47e6-bd9f-8499e7f32262", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:03.806Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4df76e60-027d-49ac-a589-ac92568cce8d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:09.343Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "d91937fb-c17e-41c5-a4f1-bf08513817ed", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:14.785Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "76e25122-460c-4304-8069-5fdf3f3984dd", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:20.154Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ce2cdac2-350e-4511-ae2e-13b3d1bff9ea", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:24.952Z INFO Remove outdated replication channel {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica"} 2024-07-04T15:16:24.954Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.23.90"} 2024-07-04T15:16:24.955Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.26.231"} 2024-07-04T15:16:24.955Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.30.96"} 2024-07-04T15:16:24.964Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-1"} 2024-07-04T15:16:24.974Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-2"} 2024-07-04T15:16:24.984Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-0"} 2024-07-04T15:16:30.739Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-1"} 2024-07-04T15:16:30.762Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-2"} 2024-07-04T15:16:30.779Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-0"} 2024-07-04T15:16:30.819Z INFO Replication pod has changed {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "new replication pod": "cross-site-source-pxc-0"} {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5"} {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab"} /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:222 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:261 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:324 If everything is fine, you can cleanup the job: $ kubectl delete pxc-restore/backup-minio $ kubectl logs job/restore-job-backup-minio-cross-site-replica $ kubectl logs job/restore-job-backup-minio-cross-site-source sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n cross-site-1974 cross-site-source --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-source patched + kubectl patch pxc -n cross-site-replica-28611 cross-site-replica --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/cross-site-replica patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.SdkWSte1up ++ mktemp + local LAST_ERR=/tmp/tmp.0ewBiFryMb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SdkWSte1up perconaxtradbcluster.pxc.percona.com "cross-site-source" deleted perconaxtradbcluster.pxc.percona.com "cross-site-replica" deleted + cat /tmp/tmp.0ewBiFryMb + rm /tmp/tmp.SdkWSte1up /tmp/tmp.0ewBiFryMb + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.DXdDl265PZ ++ mktemp + local LAST_ERR=/tmp/tmp.aQMmXsMz6T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DXdDl265PZ perconaxtradbclusterbackup.pxc.percona.com "backup-minio-source" deleted perconaxtradbclusterbackup.pxc.percona.com "backup-minio-replica" deleted + cat /tmp/tmp.aQMmXsMz6T + rm /tmp/tmp.DXdDl265PZ /tmp/tmp.aQMmXsMz6T + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.UHSwSQpTwW ++ mktemp + local LAST_ERR=/tmp/tmp.KzU0UnJLXh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UHSwSQpTwW perconaxtradbclusterrestore.pxc.percona.com "backup-minio" deleted perconaxtradbclusterrestore.pxc.percona.com "backup-minio" deleted + cat /tmp/tmp.KzU0UnJLXh + rm /tmp/tmp.UHSwSQpTwW /tmp/tmp.KzU0UnJLXh + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.lvaAJanbN0 ++ mktemp + local LAST_ERR=/tmp/tmp.yVMgB8CCKz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lvaAJanbN0 validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.yVMgB8CCKz + rm /tmp/tmp.lvaAJanbN0 /tmp/tmp.yVMgB8CCKz + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace cross-site-1974 + rm -rf /tmp/tmp.7Me1pw3ROq + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + destroy cross-site-replica-28611 true + local namespace=cross-site-replica-28611 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + local LAST_OUT=/tmp/tmp.bQn4DZLrg2 ++ mktemp + grep -v level=info ++ mktemp + grep -v 'get backup status: Job.batch' ++ get_operator_pod + sort -u ++ local label_prefix=app.kubernetes.io/ + tee /tmp/tmp.7Me1pw3ROq/operator.log + local LAST_OUT=/tmp/tmp.SzAYPLxOPG tee: /tmp/tmp.7Me1pw3ROq/operator.log: No such file or directory ++ mktemp + local LAST_ERR=/tmp/tmp.rQ00AYWQII + local exit_status=0 +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator + local LAST_ERR=/tmp/tmp.slWV0OSWPu + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace cross-site-1974 + grep -v 'the object has been modified' ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.LRWXCu7WCj +++ mktemp ++ local LAST_ERR=/tmp/tmp.HrIVcoLzhL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LRWXCu7WCj ++ cat /tmp/tmp.HrIVcoLzhL ++ rm /tmp/tmp.LRWXCu7WCj /tmp/tmp.HrIVcoLzhL ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-7577b89745-bq2qg ++ mktemp + local LAST_OUT=/tmp/tmp.9jTKWf6Rej ++ mktemp + local LAST_ERR=/tmp/tmp.3FbHfRhOZF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-7577b89745-bq2qg + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9jTKWf6Rej + cat /tmp/tmp.3FbHfRhOZF + rm /tmp/tmp.9jTKWf6Rej /tmp/tmp.3FbHfRhOZF + return 0 2024-07-04T14:46:06.420Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.26.15-gke.1469001"} 2024-07-04T14:46:06.421Z INFO setup Manager starting up {"gitCommit": "2e9e79b989086189ac2f8051c20f0f1399b0f743", "gitBranch": "PR-1745-2e9e79b9", "buildTime": "2024-07-04T13:40:32Z", "goVersion": "go1.22.5", "os": "linux", "arch": "amd64"} 2024-07-04T14:46:06.421Z INFO setup Registering Components. 2024-07-04T14:46:09.174Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2024-07-04T14:46:09.177Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2024-07-04T14:46:09.177Z INFO controller-runtime.metrics Starting metrics server 2024-07-04T14:46:09.177Z INFO controller-runtime.webhook Starting webhook server 2024-07-04T14:46:09.177Z INFO setup Starting the Cmd. 2024-07-04T14:46:09.177Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2024-07-04T14:46:09.178Z INFO controller-runtime.certwatcher Starting certificate watcher 2024-07-04T14:46:09.178Z INFO controller-runtime.certwatcher Updated current TLS certificate 2024-07-04T14:46:09.178Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2024-07-04T14:46:09.379Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2024-07-04T14:46:09.403Z DEBUG events percona-xtradb-cluster-operator-7577b89745-bq2qg_cd70d54f-9d59-4d73-96d3-b226d1a0cf44 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"8a920a48-5bf1-4684-8d0d-30fd616d79c7","apiVersion":"coordination.k8s.io/v1","resourceVersion":"33747"}, "reason": "LeaderElection"} 2024-07-04T14:46:09.403Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxc-controller"} 2024-07-04T14:46:09.404Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2024-07-04T14:46:09.404Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2024-07-04T14:46:09.509Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2024-07-04T14:46:09.510Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2024-07-04T14:46:09.515Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2024-07-04T14:48:06.918Z INFO Set CR version {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "6a480c61-e8fa-4619-8197-80a53a274133", "version": "1.15.0"} 2024-07-04T14:48:15.811Z INFO KubeAPIWarningLogger .metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: 8037a8e9-67aa-4295-9aab-a1dc315ac8d7 2024-07-04T14:49:34.604Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "operator"} 2024-07-04T14:49:34.646Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "monitor"} 2024-07-04T14:49:34.962Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:34.998Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:35.046Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "xtrabackup"} 2024-07-04T14:49:35.112Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5"} 2024-07-04T14:49:35.155Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "user": "replication"} 2024-07-04T14:49:37.252Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "0cb2af05-f7ab-4518-a4a7-559520c625c5", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.19.140:3306: connect: connection refused"} 2024-07-04T14:51:53.036Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "aa61417d-f0c4-4dd2-8875-253d2d015d8c", "user": "root"} 2024-07-04T14:51:53.278Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "aa61417d-f0c4-4dd2-8875-253d2d015d8c", "new version": "8.0.36-28.1"} 2024-07-04T14:54:24.571Z INFO Password changed, updating user {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.602Z INFO Password updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.612Z INFO MySQL init secret created {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "secret": "cross-site-source-mysql-init", "user": "replication"} 2024-07-04T14:54:24.623Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:54:24.652Z INFO Old password discarded {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "30afec7b-0fe9-4c4d-86ba-0337a69a40b7", "user": "replication"} 2024-07-04T14:55:01.894Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "cross-site-1974", "name": "backup-minio-source", "reconcileID": "e434b7be-5264-4933-9a88-02967576e855", "Namespace": "cross-site-1974", "Name": "xb-backup-minio-source"} 2024-07-04T14:57:46.555Z INFO Set CR version {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "2131da7b-2f63-4b7f-b0f5-39e52635d4d3", "version": "1.15.0"} 2024-07-04T14:57:52.827Z INFO KubeAPIWarningLogger .metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: cb21d6ea-de72-4e44-ae0a-9ebe795cadaf 2024-07-04T14:59:06.246Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "operator"} 2024-07-04T14:59:06.283Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "monitor"} 2024-07-04T14:59:06.373Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.409Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.447Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "xtrabackup"} 2024-07-04T14:59:06.499Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774"} 2024-07-04T14:59:06.532Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "f28e6aa0-ab38-4ca0-8989-d4c035381774", "user": "replication"} 2024-07-04T15:01:36.524Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "47c3f584-6025-45bf-bd86-5c1e9bc5153b", "user": "root"} 2024-07-04T15:01:36.729Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "47c3f584-6025-45bf-bd86-5c1e9bc5153b", "new version": "8.0.36-28.1"} 2024-07-04T15:03:52.455Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab"} 2024-07-04T15:03:52.500Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica"} 2024-07-04T15:04:38.623Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica", "backup": ""} 2024-07-04T15:05:13.699Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab", "cluster": "cross-site-replica"} 2024-07-04T15:06:08.832Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "38c91e79-2d44-464b-b676-e2144d2f0b42", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.31.143:3306: connect: connection refused"} 2024-07-04T15:08:43.953Z INFO You can view xtrabackup log: 2024-07-04T15:09:27.078Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-2"} 2024-07-04T15:09:27.097Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-0"} 2024-07-04T15:09:27.114Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "pod": "cross-site-replica-pxc-1"} 2024-07-04T15:09:27.182Z INFO Replication pod has changed {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "20e500f4-cb77-445f-97c4-a63300ebbfe4", "new replication pod": "cross-site-replica-pxc-0"} 2024-07-04T15:09:28.390Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "a207997c-3c11-45bd-88f7-edf703812251", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:32.719Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "fa585b58-4f05-49ed-ab82-5650b23a154c", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:38.079Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "03cf4612-96ca-4b9d-a411-6243df93b2b1", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:43.452Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4aac024d-e688-46ca-ba1f-021349edfe30", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:49.793Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "774197d2-982e-4b45-85b3-77f50bb87570", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:09:55.372Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "39cac6d0-297a-498e-b0a5-491f37889fac", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:00.808Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "af98d490-af2f-4002-8a13-29255b85145e", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:06.264Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "73d220a7-d8c5-4391-9b1c-f48a9ab82777", "channel": "source_to_replica", "Last_IO_Error": "Error connecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Access denied for user 'replication'@'cross-site-replica-pxc-0.cross-site-replica-pxc.cross-site-repli' (using password: YES)"} 2024-07-04T15:10:11.573Z INFO Password changed, updating user {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.605Z INFO Password updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.616Z INFO MySQL init secret created {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "secret": "cross-site-replica-mysql-init", "user": "replication"} 2024-07-04T15:10:11.626Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.652Z INFO Old password discarded {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "user": "replication"} 2024-07-04T15:10:11.786Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60c37bfa-c09f-4ca3-a330-cd7bc33918b9", "channel": "source_to_replica", "Last_IO_Error": ""} 2024-07-04T15:10:53.205Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio-replica", "reconcileID": "3e956558-8f49-410d-a3d8-9803866a8cde", "Namespace": "cross-site-replica-28611", "Name": "xb-backup-minio-replica"} 2024-07-04T15:11:26.164Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5"} 2024-07-04T15:11:26.272Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source"} 2024-07-04T15:11:50.978Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "71b5c358-3d5a-4857-9b1b-653812b7ee38", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:11:56.371Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "11d46050-fc06-4f4d-9a22-900aa7fc7762", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:01.753Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "037a34ef-1691-44b3-8e28-f3c87100b97f", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:07.196Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7248bb22-a4a1-46b0-8fc7-dbec4d4f960a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:12.653Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9ab0bbb7-e9a3-4aa3-9fed-e474f1b6d6aa", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:18.246Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4854ee42-1cfb-4542-9447-c3c9960bd240", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:23.420Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source", "backup": ""} 2024-07-04T15:12:24.784Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4216d9ef-10cd-4ee2-9df1-8e509d853b9d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:30.190Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e9857a81-a712-4511-82e6-7699ce1c0c66", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:35.648Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "3d8b004f-a0a0-4285-8677-11a9cf3dad28", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:40.480Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5", "cluster": "cross-site-source"} 2024-07-04T15:12:41.191Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "6f09a370-dd7e-49d2-a038-ce102859da6e", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 1/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:46.544Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "a9150814-a3f5-45c8-820b-7b2c3b9b6251", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:51.980Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "8a6180b8-b133-4e86-bdce-8068f5734321", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:12:57.433Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9e38bd11-e83e-41d6-a1a1-0cca90feb170", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:02.891Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "9d11da87-d168-4a7e-bb3c-29911c50aef0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:08.339Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "b65f0c2a-ae1b-49cf-aa7f-c8f2b10a4980", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:13.744Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7a78e391-7f15-4b47-a80b-b3917508c0aa", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:19.480Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "86f99d5e-dae5-4b2d-a0de-4dbd2e5974e5", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:21.997Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "4e88a23d-2757-489e-b523-7f938c2f48e5", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 10.215.19.140:3306: connect: connection refused"} 2024-07-04T15:13:25.057Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "edbd10a9-6bf0-486e-b42e-cf1b3910d973", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:30.753Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "16edc42a-f1c4-4c62-a6df-e6888fd793dc", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:36.190Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "11b3e643-5d77-43fa-8363-14b90b7a624a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:41.643Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "b90e4935-fdb3-45c6-b5a6-8373191d64f8", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 2/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:47.036Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "67b2a9cf-aedd-4fb9-93f7-72fcaae403ec", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:52.404Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "3706ba29-bf4e-49d7-aba5-298082db6821", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:13:57.937Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "696d8385-9dcd-456d-a785-cbda4499dcf2", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:03.413Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4fd458f6-62e6-4bee-b20f-cc8000d7a4c0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:09.066Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "49bd9e2b-89b4-48f8-bc5d-710f0509bed0", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:14.504Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ef930124-25e7-4a26-be56-cfc1ff6f1701", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:19.955Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "dfe25063-c390-41cf-944f-8fa3e089aebb", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:25.343Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "8320d9f1-5d5e-46fe-a245-56a8a9cdc78e", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:30.791Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "728b6825-ca4c-44d6-b69d-2c1768121c07", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:36.461Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "fca4b246-289d-491c-a16d-3a4670fa5b79", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:41.925Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e371e6be-b67f-40d8-8547-3ed501adb541", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 3/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:47.338Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4a899592-a23f-449b-92cc-c3a3c7902c67", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:52.806Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "60158c84-13a6-432b-8685-b514e8ce7adc", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:14:58.315Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4c73d013-555d-4842-bfaa-c8f87dc88eb1", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:03.739Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ea960baa-da38-4ccb-b084-c6892a4d467a", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:09.416Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "bdb4d886-6f7a-4249-9653-654a5cb88200", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:14.871Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "1052107d-7268-4df8-bfa1-1cbe67f66d2d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:20.212Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "2687d16f-ef1c-43f8-bd5a-40ac99fbbd58", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:25.581Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "7cd240d0-8e18-48b8-82b5-41de869fc438", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:31.003Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "471cbeb0-9443-4156-aab0-7c86cac89895", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:36.453Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e0cc0ec0-372a-4912-bd7a-77dbf62f10f9", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:41.837Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "5470b84a-16d2-41bb-8698-5f120908fa1c", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 4/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:47.340Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "34299ef1-cbe2-45d0-90ac-c9c59104cc83", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:49.676Z INFO You can view xtrabackup log: 2024-07-04T15:15:52.788Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "d7d78b59-fda3-4f10-a105-d8bd31edfc90", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:15:58.243Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e8d2120e-a9fb-47e6-bd9f-8499e7f32262", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:03.806Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "4df76e60-027d-49ac-a589-ac92568cce8d", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:09.343Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "d91937fb-c17e-41c5-a4f1-bf08513817ed", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:14.785Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "76e25122-460c-4304-8069-5fdf3f3984dd", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:20.154Z INFO Replication for channel is not running. Please, check the replication status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "ce2cdac2-350e-4511-ae2e-13b3d1bff9ea", "channel": "source_to_replica", "Last_IO_Error": "Error reconnecting to source 'replication@10.215.30.96:3306'. This was attempt 5/0, with a delay of 60 seconds between attempts. Message: Can't connect to MySQL server on '10.215.30.96:3306' (111)"} 2024-07-04T15:16:24.952Z INFO Remove outdated replication channel {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica"} 2024-07-04T15:16:24.954Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.23.90"} 2024-07-04T15:16:24.955Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.26.231"} 2024-07-04T15:16:24.955Z DEBUG Remove outdated replication source {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "channel": "source_to_replica", "host": "10.215.30.96"} 2024-07-04T15:16:24.964Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-1"} 2024-07-04T15:16:24.974Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-2"} 2024-07-04T15:16:24.984Z INFO Primary is readonly. Disabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "e81914c0-105e-403c-9081-46b339aa3486", "pod": "cross-site-replica-pxc-0"} 2024-07-04T15:16:30.739Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-1"} 2024-07-04T15:16:30.762Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-2"} 2024-07-04T15:16:30.779Z INFO Replica is not readonly. Enabling readonly mode {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "pod": "cross-site-source-pxc-0"} 2024-07-04T15:16:30.819Z INFO Replication pod has changed {"controller": "pxc-controller", "namespace": "cross-site-1974", "name": "cross-site-source", "reconcileID": "22447e05-30c9-4bcf-8373-6c4ab12a2a45", "new replication pod": "cross-site-source-pxc-0"} 2024-07-04T15:17:32.764Z ERROR Update status {"controller": "pxc-controller", "namespace": "cross-site-replica-28611", "name": "cross-site-replica", "reconcileID": "87925d44-c7dc-4458-ba66-3549a69ecf9b", "error": "write status: PerconaXtraDBCluster.pxc.percona.com \"cross-site-replica\" not found", "errorVerbose": "PerconaXtraDBCluster.pxc.percona.com \"cross-site-replica\" not found\nwrite status\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).writeStatus\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/status.go:157\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).updateStatus\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/status.go:140\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:201\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:455\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} {"controller": "pxcrestore-controller", "namespace": "cross-site-1974", "name": "backup-minio", "reconcileID": "a9256432-7902-4600-8d7b-80b889510be5"} {"controller": "pxcrestore-controller", "namespace": "cross-site-replica-28611", "name": "backup-minio", "reconcileID": "d4b6ceba-fe64-42f6-aff1-3c49180a17ab"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile.func1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:114 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:222 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:261 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:311 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.4/pkg/internal/controller/controller.go:324 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:203 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:455 If everything is fine, you can cleanup the job: $ kubectl delete pxc-restore/backup-minio $ kubectl logs job/restore-job-backup-minio-cross-site-replica $ kubectl logs job/restore-job-backup-minio-cross-site-source sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 + : + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.5q83Nwdqlo ++ mktemp + local LAST_ERR=/tmp/tmp.zbWe6Rd1mj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5q83Nwdqlo No resources found + cat /tmp/tmp.zbWe6Rd1mj + rm /tmp/tmp.5q83Nwdqlo /tmp/tmp.zbWe6Rd1mj + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.Um7iVx3KZ4 ++ mktemp + local LAST_ERR=/tmp/tmp.ncPcINW5C3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Um7iVx3KZ4 No resources found + cat /tmp/tmp.ncPcINW5C3 + rm /tmp/tmp.Um7iVx3KZ4 /tmp/tmp.ncPcINW5C3 + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.2Yy0wPngl8 ++ mktemp + local LAST_ERR=/tmp/tmp.r7RWBOshZl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2Yy0wPngl8 No resources found + cat /tmp/tmp.r7RWBOshZl + rm /tmp/tmp.2Yy0wPngl8 /tmp/tmp.r7RWBOshZl + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.IzKvwc0ypM ++ mktemp + local LAST_ERR=/tmp/tmp.C44nPz36Lf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.IzKvwc0ypM + cat /tmp/tmp.C44nPz36Lf Error from server (NotFound): validatingwebhookconfigurations.admissionregistration.k8s.io "percona-xtradbcluster-webhook" not found + rm /tmp/tmp.IzKvwc0ypM /tmp/tmp.C44nPz36Lf + return 1 + : + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SzAYPLxOPG namespace "pxc-operator" force deleted + cat /tmp/tmp.slWV0OSWPu Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.SzAYPLxOPG /tmp/tmp.slWV0OSWPu + return 0 + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace cross-site-replica-28611 + rm -rf /tmp/tmp.7Me1pw3ROq + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.bWoNRGQC86 + local LAST_OUT=/tmp/tmp.rOR5ckTMdw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.0B6gp9SMCv + local exit_status=0 + local LAST_ERR=/tmp/tmp.1LwNsaBcT7 ++ seq 0 2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace cross-site-replica-28611 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.rOR5ckTMdw + cat /tmp/tmp.0B6gp9SMCv Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. Error from server (NotFound): namespaces "pxc-operator" not found + rm /tmp/tmp.rOR5ckTMdw /tmp/tmp.0B6gp9SMCv + return 1