Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/logs/security-context-8-0.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 + create_infra security-context-6372 + local ns=security-context-6372 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n security-context-27375 sec-context --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/sec-context patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4TtDNDsARb ++ mktemp + local LAST_ERR=/tmp/tmp.qfPWHufXDS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4TtDNDsARb perconaxtradbcluster.pxc.percona.com "sec-context" deleted + cat /tmp/tmp.qfPWHufXDS + rm /tmp/tmp.4TtDNDsARb /tmp/tmp.qfPWHufXDS + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.9BkerQiEEi ++ mktemp + local LAST_ERR=/tmp/tmp.4sqUN23ssF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9BkerQiEEi No resources found + cat /tmp/tmp.4sqUN23ssF + rm /tmp/tmp.9BkerQiEEi /tmp/tmp.4sqUN23ssF + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.FVbTwTV3Qb ++ mktemp + local LAST_ERR=/tmp/tmp.jHj2q0oPzp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FVbTwTV3Qb No resources found + cat /tmp/tmp.jHj2q0oPzp + rm /tmp/tmp.FVbTwTV3Qb /tmp/tmp.jHj2q0oPzp + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' ++ mktemp + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + local LAST_OUT=/tmp/tmp.I0jpEzc0yH ++ mktemp + local LAST_OUT=/tmp/tmp.Uvz544fy9K ++ mktemp + local LAST_ERR=/tmp/tmp.bMb77v79fo + local exit_status=0 + local LAST_ERR=/tmp/tmp.Ps27VUyDY4 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + awk '{print$1}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Uvz544fy9K + cat /tmp/tmp.Ps27VUyDY4 + rm /tmp/tmp.Uvz544fy9K /tmp/tmp.Ps27VUyDY4 + return 0 namespace "cert-manager" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "security-context-27375" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.I0jpEzc0yH namespace "pxc-operator" deleted + cat /tmp/tmp.bMb77v79fo + rm /tmp/tmp.I0jpEzc0yH /tmp/tmp.bMb77v79fo + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bp7AJvAFpw ++ mktemp + local LAST_ERR=/tmp/tmp.7vqlC4GH93 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bp7AJvAFpw namespace/pxc-operator created + cat /tmp/tmp.7vqlC4GH93 + rm /tmp/tmp.bp7AJvAFpw /tmp/tmp.7vqlC4GH93 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.uW4IrmF24v +++ mktemp ++ local LAST_ERR=/tmp/tmp.dosRWFIs0Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uW4IrmF24v ++ cat /tmp/tmp.dosRWFIs0Y ++ rm /tmp/tmp.uW4IrmF24v /tmp/tmp.dosRWFIs0Y ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.UcZYrHimqx ++ mktemp + local LAST_ERR=/tmp/tmp.o69l9P0ETz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UcZYrHimqx Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3" modified. + cat /tmp/tmp.o69l9P0ETz + rm /tmp/tmp.UcZYrHimqx /tmp/tmp.o69l9P0ETz + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Aveol7M9y1 ++ mktemp + local LAST_ERR=/tmp/tmp.0LLki2do7d + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Aveol7M9y1 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.0LLki2do7d + rm /tmp/tmp.Aveol7M9y1 /tmp/tmp.0LLki2do7d + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rsonHR1vVw ++ mktemp + local LAST_ERR=/tmp/tmp.BiMvVB934x + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rsonHR1vVw clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.BiMvVB934x + rm /tmp/tmp.rsonHR1vVw /tmp/tmp.BiMvVB934x + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.BWcXZ78dFx + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1774-70b9684b^' ++ mktemp + local LAST_ERR=/tmp/tmp.hsYwYipJta + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BWcXZ78dFx deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.hsYwYipJta + rm /tmp/tmp.BWcXZ78dFx /tmp/tmp.hsYwYipJta + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.32E21LfZic ++ mktemp + local LAST_ERR=/tmp/tmp.jJRGMhwFxu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.32E21LfZic pod/percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb condition met + cat /tmp/tmp.jJRGMhwFxu + rm /tmp/tmp.32E21LfZic /tmp/tmp.jJRGMhwFxu + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.unuIf2xZ87 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MqbDzsu95h ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.unuIf2xZ87 ++ cat /tmp/tmp.MqbDzsu95h ++ rm /tmp/tmp.unuIf2xZ87 /tmp/tmp.MqbDzsu95h ++ return 0 + wait_pod percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb 480 pxc-operator + local pod=percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb condition met percona-xtradb-cluster-operator-56bc5d9fb9-vn5wb.Ok + sleep 3 + create_namespace security-context-6372 + local namespace=security-context-6372 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces security-context-6372' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces security-context-6372 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace security-context-6372 + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.YeArY12zbW + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.TZJ2XzkaCL ++ mktemp + local LAST_ERR=/tmp/tmp.QDV1PMy0Gd + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.C3dZsdywNK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + awk '{print$1}' + for i in '$(seq 0 2)' + set +e + kubectl delete namespace security-context-6372 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace security-context-6372 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YeArY12zbW + cat /tmp/tmp.QDV1PMy0Gd + rm /tmp/tmp.YeArY12zbW /tmp/tmp.QDV1PMy0Gd + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace security-context-6372 namespace "gmp-public" deleted namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.TZJ2XzkaCL + cat /tmp/tmp.C3dZsdywNK Error from server (NotFound): namespaces "security-context-6372" not found + rm /tmp/tmp.TZJ2XzkaCL /tmp/tmp.C3dZsdywNK + return 1 + : + wait_for_delete namespace/security-context-6372 + local res=namespace/security-context-6372 + echo -n 'namespace/security-context-6372 - ' namespace/security-context-6372 - + set +o xtrace Error from server (NotFound): namespaces "security-context-6372" not found + desc 'create namespace security-context-6372' + set +o xtrace ----------------------------------------------------------------------------------- create namespace security-context-6372 ----------------------------------------------------------------------------------- + kubectl_bin create namespace security-context-6372 ++ mktemp + local LAST_OUT=/tmp/tmp.4DN9pF9ZI8 ++ mktemp + local LAST_ERR=/tmp/tmp.YvWWIIyowq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace security-context-6372 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4DN9pF9ZI8 namespace/security-context-6372 created + cat /tmp/tmp.YvWWIIyowq + rm /tmp/tmp.4DN9pF9ZI8 /tmp/tmp.YvWWIIyowq + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.MV0tsCyzjA +++ mktemp ++ local LAST_ERR=/tmp/tmp.lfyCDrVFTK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MV0tsCyzjA ++ cat /tmp/tmp.lfyCDrVFTK ++ rm /tmp/tmp.MV0tsCyzjA /tmp/tmp.lfyCDrVFTK ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=security-context-6372 ++ mktemp + local LAST_OUT=/tmp/tmp.o1qZDyCnMj ++ mktemp + local LAST_ERR=/tmp/tmp.GicZOokVU1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=security-context-6372 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.o1qZDyCnMj Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3" modified. + cat /tmp/tmp.GicZOokVU1 + rm /tmp/tmp.o1qZDyCnMj /tmp/tmp.GicZOokVU1 + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Y9N0HHxKSX ++ mktemp + local LAST_ERR=/tmp/tmp.8AKIkgNVsk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y9N0HHxKSX secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.8AKIkgNVsk + rm /tmp/tmp.Y9N0HHxKSX /tmp/tmp.8AKIkgNVsk + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.33oQdjg8ST ++ mktemp + local LAST_ERR=/tmp/tmp.sV3ycHwcSU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.33oQdjg8ST namespace/cert-manager created + cat /tmp/tmp.sV3ycHwcSU + rm /tmp/tmp.33oQdjg8ST /tmp/tmp.sV3ycHwcSU + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.m6cC2W9w8c ++ mktemp + local LAST_ERR=/tmp/tmp.qvB3zBSCJf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.m6cC2W9w8c namespace/cert-manager labeled + cat /tmp/tmp.qvB3zBSCJf + rm /tmp/tmp.m6cC2W9w8c /tmp/tmp.qvB3zBSCJf + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.jgdJKOaOYl ++ mktemp + local LAST_ERR=/tmp/tmp.Rx27x7w3Lw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jgdJKOaOYl namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.Rx27x7w3Lw Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.jgdJKOaOYl /tmp/tmp.Rx27x7w3Lw + return 0 + '[' '' == 4.10 ']' + sleep 70 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/service-account.yml ++ mktemp + local LAST_OUT=/tmp/tmp.nWj8qw5EnE ++ mktemp + local LAST_ERR=/tmp/tmp.50AXsgZrOi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/service-account.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nWj8qw5EnE serviceaccount/percona-xtradb-cluster-operator-workload created + cat /tmp/tmp.50AXsgZrOi + rm /tmp/tmp.nWj8qw5EnE /tmp/tmp.50AXsgZrOi + return 0 + [[ -n '' ]] + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + cluster=sec-context + spinup_pxc sec-context /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context.yml 3 10 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/secrets_without_tls.yml + local cluster=sec-context + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.iRHROexbtQ ++ mktemp + local LAST_ERR=/tmp/tmp.nzG8pRpwWB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iRHROexbtQ secret/my-cluster-secrets created + cat /tmp/tmp.nzG8pRpwWB + rm /tmp/tmp.iRHROexbtQ /tmp/tmp.nzG8pRpwWB + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/client.yml + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.dfIHrxYpq7 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-6372~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1774-70b9684b#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/client.yml + local LAST_ERR=/tmp/tmp.GfS4LlvF4D + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dfIHrxYpq7 deployment.apps/pxc-client created + cat /tmp/tmp.GfS4LlvF4D + rm /tmp/tmp.dfIHrxYpq7 /tmp/tmp.GfS4LlvF4D + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context.yml + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_OUT=/tmp/tmp.l9OD3ZUnt1 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1774-70b9684b#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-6372~ + local LAST_ERR=/tmp/tmp.JbBroH1kfB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.l9OD3ZUnt1 perconaxtradbcluster.pxc.percona.com/sec-context created + cat /tmp/tmp.JbBroH1kfB + rm /tmp/tmp.l9OD3ZUnt1 /tmp/tmp.JbBroH1kfB + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy sec-context ++ local target_cluster=sec-context +++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T7fs9RZrHA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WZAhyqGhu6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.T7fs9RZrHA +++ cat /tmp/tmp.WZAhyqGhu6 +++ rm /tmp/tmp.T7fs9RZrHA /tmp/tmp.WZAhyqGhu6 +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rYSN9DwSpS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fMKzMZKkiy +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.rYSN9DwSpS +++ cat /tmp/tmp.fMKzMZKkiy +++ rm /tmp/tmp.rYSN9DwSpS /tmp/tmp.fMKzMZKkiy +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo sec-context-proxysql ++ return + local proxy=sec-context-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n security-context-6372 ++ mktemp + local LAST_OUT=/tmp/tmp.IIbFO9D7Jh ++ mktemp + local LAST_ERR=/tmp/tmp.p8tIpT0x4X + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n security-context-6372 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n security-context-6372 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n security-context-6372 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.IIbFO9D7Jh + cat /tmp/tmp.p8tIpT0x4X error: no matching resources found + rm /tmp/tmp.IIbFO9D7Jh /tmp/tmp.p8tIpT0x4X + return 1 + true + wait_for_running sec-context-proxysql 1 + local name=sec-context-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-proxysql-0 480 + local pod=sec-context-proxysql-0 + local max_retry=480 + local ns= ++ echo sec-context-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace Error from server (NotFound): pods "sec-context-proxysql-0" not found sec-context-proxysql-0.........Ok + wait_for_running sec-context-pxc 3 + local name=sec-context-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-0 480 + local pod=sec-context-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo sec-context-pxc-0 + local container=pxc + set +o xtrace pod/sec-context-pxc-0 condition met sec-context-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-1 480 + local pod=sec-context-pxc-1 + local max_retry=480 + local ns= ++ echo sec-context-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/sec-context-pxc-1 condition met sec-context-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-2 480 + local pod=sec-context-pxc-2 + local max_retry=480 + local ns= ++ echo sec-context-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/sec-context-pxc-2 condition met sec-context-pxc-2.Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h sec-context-proxysql -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h sec-context-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kf2YlsH5MR +++ mktemp ++ local LAST_ERR=/tmp/tmp.he8JCIDFMs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kf2YlsH5MR ++ cat /tmp/tmp.he8JCIDFMs ++ rm /tmp/tmp.kf2YlsH5MR /tmp/tmp.he8JCIDFMs ++ return 0 + client_pod=pxc-client-65c795cbdf-tz78g + wait_pod pxc-client-65c795cbdf-tz78g + local pod=pxc-client-65c795cbdf-tz78g + local max_retry=480 + local ns= ++ echo pxc-client-65c795cbdf-tz78g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c795cbdf-tz78g condition met pxc-client-65c795cbdf-tz78g.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h sec-context-proxysql -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h sec-context-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pbkZ7lr5tV +++ mktemp ++ local LAST_ERR=/tmp/tmp.XGLkELNTtr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pbkZ7lr5tV ++ cat /tmp/tmp.XGLkELNTtr ++ rm /tmp/tmp.pbkZ7lr5tV /tmp/tmp.XGLkELNTtr ++ return 0 + client_pod=pxc-client-65c795cbdf-tz78g + wait_pod pxc-client-65c795cbdf-tz78g + local pod=pxc-client-65c795cbdf-tz78g + local max_retry=480 + local ns= ++ echo pxc-client-65c795cbdf-tz78g ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-65c795cbdf-tz78g condition met pxc-client-65c795cbdf-tz78g.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dow2pQGK2k +++ mktemp ++ local LAST_ERR=/tmp/tmp.LDpkni5T93 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Dow2pQGK2k ++ cat /tmp/tmp.LDpkni5T93 ++ rm /tmp/tmp.Dow2pQGK2k /tmp/tmp.LDpkni5T93 ++ return 0 + client_pod=pxc-client-65c795cbdf-tz78g + wait_pod pxc-client-65c795cbdf-tz78g + local pod=pxc-client-65c795cbdf-tz78g + local max_retry=480 + local ns= ++ echo pxc-client-65c795cbdf-tz78g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c795cbdf-tz78g condition met pxc-client-65c795cbdf-tz78g.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ohz7RI8nVC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.ohz7RI8nVC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b1v02I8G1y +++ mktemp ++ local LAST_ERR=/tmp/tmp.dy65rXgk4F ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.b1v02I8G1y ++ cat /tmp/tmp.dy65rXgk4F ++ rm /tmp/tmp.b1v02I8G1y /tmp/tmp.dy65rXgk4F ++ return 0 + client_pod=pxc-client-65c795cbdf-tz78g + wait_pod pxc-client-65c795cbdf-tz78g + local pod=pxc-client-65c795cbdf-tz78g + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-65c795cbdf-tz78g + local container= + set +o xtrace pod/pxc-client-65c795cbdf-tz78g condition met pxc-client-65c795cbdf-tz78g.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ohz7RI8nVC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.ohz7RI8nVC/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ze2MtTB16i +++ mktemp ++ local LAST_ERR=/tmp/tmp.sOXUdd9P0U ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ze2MtTB16i ++ cat /tmp/tmp.sOXUdd9P0U ++ rm /tmp/tmp.Ze2MtTB16i /tmp/tmp.sOXUdd9P0U ++ return 0 + client_pod=pxc-client-65c795cbdf-tz78g + wait_pod pxc-client-65c795cbdf-tz78g + local pod=pxc-client-65c795cbdf-tz78g + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-65c795cbdf-tz78g ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-65c795cbdf-tz78g condition met pxc-client-65c795cbdf-tz78g.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.ohz7RI8nVC/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.ohz7RI8nVC/select-1.sql ++ is_keyring_plugin_in_use sec-context ++ local cluster=sec-context ++ kubectl_bin exec -it sec-context-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MZ4DYTEVkT +++ mktemp ++ local LAST_ERR=/tmp/tmp.hquvjKemiT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it sec-context-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MZ4DYTEVkT ++ cat /tmp/tmp.hquvjKemiT Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.MZ4DYTEVkT /tmp/tmp.hquvjKemiT ++ return 0 + '[' '' ']' + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/sec-context-pxc + local resource=statefulset/sec-context-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc.yml + local new_result=/tmp/tmp.ohz7RI8nVC/statefulset_sec-context-pxc.yml + desc 'compare statefulset/sec-context-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/sec-context-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.27 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/sec-context-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.VmBlJNqiwz ++ mktemp + local LAST_ERR=/tmp/tmp.Sab88AZfle + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/sec-context-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VmBlJNqiwz + cat /tmp/tmp.Sab88AZfle + rm /tmp/tmp.VmBlJNqiwz /tmp/tmp.Sab88AZfle + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-k127.yml /tmp/tmp.ohz7RI8nVC/statefulset_sec-context-pxc.yml + compare_kubectl statefulset/sec-context-proxysql + local resource=statefulset/sec-context-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql.yml + local new_result=/tmp/tmp.ohz7RI8nVC/statefulset_sec-context-proxysql.yml + desc 'compare statefulset/sec-context-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/sec-context-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.27 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/sec-context-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.D0hx6TKFFy ++ mktemp + local LAST_ERR=/tmp/tmp.hHj5bV27FL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/sec-context-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D0hx6TKFFy + cat /tmp/tmp.hHj5bV27FL + rm /tmp/tmp.D0hx6TKFFy /tmp/tmp.hHj5bV27FL + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-k127.yml /tmp/tmp.ohz7RI8nVC/statefulset_sec-context-proxysql.yml + desc 'change security context in PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- change security context in PXC cluster ----------------------------------------------------------------------------------- + pfx=-changes + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-changes.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-changes.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-changes.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_OUT=/tmp/tmp.inwQVgL4Bh + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-6372~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_ERR=/tmp/tmp.eXxl4uFAjn + local exit_status=0 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1774-70b9684b#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.inwQVgL4Bh perconaxtradbcluster.pxc.percona.com/sec-context configured + cat /tmp/tmp.eXxl4uFAjn + rm /tmp/tmp.inwQVgL4Bh /tmp/tmp.eXxl4uFAjn + return 0 + sleep 30 + desc 'check if service and statefulset chenged to expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset chenged to expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/sec-context-pxc -changes + local resource=statefulset/sec-context-pxc + local postfix=-changes + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes.yml + local new_result=/tmp/tmp.ohz7RI8nVC/statefulset_sec-context-pxc.yml + desc 'compare statefulset/sec-context-pxc--changes' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/sec-context-pxc--changes ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/sec-context-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.gZkwsrkpGj ++ mktemp + local LAST_ERR=/tmp/tmp.5I6sSGaEgs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/sec-context-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gZkwsrkpGj + cat /tmp/tmp.5I6sSGaEgs + rm /tmp/tmp.gZkwsrkpGj /tmp/tmp.5I6sSGaEgs + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-k127.yml /tmp/tmp.ohz7RI8nVC/statefulset_sec-context-pxc.yml + compare_kubectl statefulset/sec-context-proxysql -changes + local resource=statefulset/sec-context-proxysql + local postfix=-changes + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes.yml + local new_result=/tmp/tmp.ohz7RI8nVC/statefulset_sec-context-proxysql.yml + desc 'compare statefulset/sec-context-proxysql--changes' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/sec-context-proxysql--changes ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-k127-eks.yml ']' + kubectl_bin get -o yaml statefulset/sec-context-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.ifTocZBL7d ++ mktemp + local LAST_ERR=/tmp/tmp.W9YDquqniS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/sec-context-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ifTocZBL7d + cat /tmp/tmp.W9YDquqniS + rm /tmp/tmp.ifTocZBL7d /tmp/tmp.W9YDquqniS + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-k127.yml /tmp/tmp.ohz7RI8nVC/statefulset_sec-context-proxysql.yml + wait_cluster_consistency sec-context 3 2 + local cluster_name=sec-context + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HXwRzU7KJs +++ mktemp ++ local LAST_ERR=/tmp/tmp.WhFKGVi61J ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.HXwRzU7KJs ++ cat /tmp/tmp.WhFKGVi61J ++ rm /tmp/tmp.HXwRzU7KJs /tmp/tmp.WhFKGVi61J ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Halz1YhvNZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.I329KHNikj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Halz1YhvNZ ++ cat /tmp/tmp.I329KHNikj ++ rm /tmp/tmp.Halz1YhvNZ /tmp/tmp.I329KHNikj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CZnUSNEVgi +++ mktemp ++ local LAST_ERR=/tmp/tmp.97nTJPhPca ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CZnUSNEVgi ++ cat /tmp/tmp.97nTJPhPca ++ rm /tmp/tmp.CZnUSNEVgi /tmp/tmp.97nTJPhPca ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P52OlenS0p +++ mktemp ++ local LAST_ERR=/tmp/tmp.E61XDVKxf6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.P52OlenS0p ++ cat /tmp/tmp.E61XDVKxf6 ++ rm /tmp/tmp.P52OlenS0p /tmp/tmp.E61XDVKxf6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wOC54hXE2w +++ mktemp ++ local LAST_ERR=/tmp/tmp.deSxUTu74U ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wOC54hXE2w ++ cat /tmp/tmp.deSxUTu74U ++ rm /tmp/tmp.wOC54hXE2w /tmp/tmp.deSxUTu74U ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oQ73X1nGzu +++ mktemp ++ local LAST_ERR=/tmp/tmp.NJg0MdX4gp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oQ73X1nGzu ++ cat /tmp/tmp.NJg0MdX4gp ++ rm /tmp/tmp.oQ73X1nGzu /tmp/tmp.NJg0MdX4gp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tRuCXS1rnw +++ mktemp ++ local LAST_ERR=/tmp/tmp.QW8vAtTDBn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tRuCXS1rnw ++ cat /tmp/tmp.QW8vAtTDBn ++ rm /tmp/tmp.tRuCXS1rnw /tmp/tmp.QW8vAtTDBn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R0QsC6jQmt +++ mktemp ++ local LAST_ERR=/tmp/tmp.jm88WSGxys ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.R0QsC6jQmt ++ cat /tmp/tmp.jm88WSGxys ++ rm /tmp/tmp.R0QsC6jQmt /tmp/tmp.jm88WSGxys ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d3D8aFNSFS +++ mktemp ++ local LAST_ERR=/tmp/tmp.faQzUwkK2V ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.d3D8aFNSFS ++ cat /tmp/tmp.faQzUwkK2V ++ rm /tmp/tmp.d3D8aFNSFS /tmp/tmp.faQzUwkK2V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 8 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ht0X3L3bYW +++ mktemp ++ local LAST_ERR=/tmp/tmp.ws0jmU23X8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ht0X3L3bYW ++ cat /tmp/tmp.ws0jmU23X8 ++ rm /tmp/tmp.ht0X3L3bYW /tmp/tmp.ws0jmU23X8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 9 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ACwzHucrGO +++ mktemp ++ local LAST_ERR=/tmp/tmp.SkvnMPTpgw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ACwzHucrGO ++ cat /tmp/tmp.SkvnMPTpgw ++ rm /tmp/tmp.ACwzHucrGO /tmp/tmp.SkvnMPTpgw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 10 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5QiQIipYgY +++ mktemp ++ local LAST_ERR=/tmp/tmp.uate4kdXA7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5QiQIipYgY ++ cat /tmp/tmp.uate4kdXA7 ++ rm /tmp/tmp.5QiQIipYgY /tmp/tmp.uate4kdXA7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 11 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Kfnah2HfA +++ mktemp ++ local LAST_ERR=/tmp/tmp.z67gycRiXu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2Kfnah2HfA ++ cat /tmp/tmp.z67gycRiXu ++ rm /tmp/tmp.2Kfnah2HfA /tmp/tmp.z67gycRiXu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 12 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rhgl7z6Gzc +++ mktemp ++ local LAST_ERR=/tmp/tmp.7zHd7r4zaD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Rhgl7z6Gzc ++ cat /tmp/tmp.7zHd7r4zaD ++ rm /tmp/tmp.Rhgl7z6Gzc /tmp/tmp.7zHd7r4zaD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 13 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pSKbmtqgmj +++ mktemp ++ local LAST_ERR=/tmp/tmp.0Ca3L9Hz7r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pSKbmtqgmj ++ cat /tmp/tmp.0Ca3L9Hz7r ++ rm /tmp/tmp.pSKbmtqgmj /tmp/tmp.0Ca3L9Hz7r ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 14 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AxmCoMxXhB +++ mktemp ++ local LAST_ERR=/tmp/tmp.hjprkNDf7Z ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AxmCoMxXhB ++ cat /tmp/tmp.hjprkNDf7Z ++ rm /tmp/tmp.AxmCoMxXhB /tmp/tmp.hjprkNDf7Z ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 15 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.auuQdeM8lH +++ mktemp ++ local LAST_ERR=/tmp/tmp.GEAI1Fu38Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.auuQdeM8lH ++ cat /tmp/tmp.GEAI1Fu38Y ++ rm /tmp/tmp.auuQdeM8lH /tmp/tmp.GEAI1Fu38Y ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 16 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uYglB6kKsu +++ mktemp ++ local LAST_ERR=/tmp/tmp.GqsDc6UcV7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uYglB6kKsu ++ cat /tmp/tmp.GqsDc6UcV7 ++ rm /tmp/tmp.uYglB6kKsu /tmp/tmp.GqsDc6UcV7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 17 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g2NJo6OwpA +++ mktemp ++ local LAST_ERR=/tmp/tmp.SMZm9HMzQI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.g2NJo6OwpA ++ cat /tmp/tmp.SMZm9HMzQI ++ rm /tmp/tmp.g2NJo6OwpA /tmp/tmp.SMZm9HMzQI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 18 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v8HFDRFzSP +++ mktemp ++ local LAST_ERR=/tmp/tmp.K9IBer5lzU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.v8HFDRFzSP ++ cat /tmp/tmp.K9IBer5lzU ++ rm /tmp/tmp.v8HFDRFzSP /tmp/tmp.K9IBer5lzU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 19 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bovQDDHecO +++ mktemp ++ local LAST_ERR=/tmp/tmp.IKTwN4BA2n ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bovQDDHecO ++ cat /tmp/tmp.IKTwN4BA2n ++ rm /tmp/tmp.bovQDDHecO /tmp/tmp.IKTwN4BA2n ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 20 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qH4cl7YDfF +++ mktemp ++ local LAST_ERR=/tmp/tmp.LuP0fokCFz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qH4cl7YDfF ++ cat /tmp/tmp.LuP0fokCFz ++ rm /tmp/tmp.qH4cl7YDfF /tmp/tmp.LuP0fokCFz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 21 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0QQcSjiGQC +++ mktemp ++ local LAST_ERR=/tmp/tmp.weVqF28e9W ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0QQcSjiGQC ++ cat /tmp/tmp.weVqF28e9W ++ rm /tmp/tmp.0QQcSjiGQC /tmp/tmp.weVqF28e9W ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 22 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D2SEDbxpDx +++ mktemp ++ local LAST_ERR=/tmp/tmp.LQNsq1IO5R ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.D2SEDbxpDx ++ cat /tmp/tmp.LQNsq1IO5R ++ rm /tmp/tmp.D2SEDbxpDx /tmp/tmp.LQNsq1IO5R ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 23 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EY0ywo2MZ8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ujqv8x7FTe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EY0ywo2MZ8 ++ cat /tmp/tmp.ujqv8x7FTe ++ rm /tmp/tmp.EY0ywo2MZ8 /tmp/tmp.ujqv8x7FTe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 24 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RuKfsKcF7h +++ mktemp ++ local LAST_ERR=/tmp/tmp.NGlkgKqfm5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RuKfsKcF7h ++ cat /tmp/tmp.NGlkgKqfm5 ++ rm /tmp/tmp.RuKfsKcF7h /tmp/tmp.NGlkgKqfm5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 25 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QY4ItkZudm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rw9bgEuFPE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QY4ItkZudm ++ cat /tmp/tmp.Rw9bgEuFPE ++ rm /tmp/tmp.QY4ItkZudm /tmp/tmp.Rw9bgEuFPE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 26 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q152tJgwqS +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Y2JiyxiOX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.q152tJgwqS ++ cat /tmp/tmp.8Y2JiyxiOX ++ rm /tmp/tmp.q152tJgwqS /tmp/tmp.8Y2JiyxiOX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 27 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OYHCttXXus +++ mktemp ++ local LAST_ERR=/tmp/tmp.DlPY9gYscP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OYHCttXXus ++ cat /tmp/tmp.DlPY9gYscP ++ rm /tmp/tmp.OYHCttXXus /tmp/tmp.DlPY9gYscP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 28 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FkXNDS8QhF +++ mktemp ++ local LAST_ERR=/tmp/tmp.T2IgbquuBB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FkXNDS8QhF ++ cat /tmp/tmp.T2IgbquuBB ++ rm /tmp/tmp.FkXNDS8QhF /tmp/tmp.T2IgbquuBB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 29 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0jkhfAEQEE +++ mktemp ++ local LAST_ERR=/tmp/tmp.dbVkkBtsRn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0jkhfAEQEE ++ cat /tmp/tmp.dbVkkBtsRn ++ rm /tmp/tmp.0jkhfAEQEE /tmp/tmp.dbVkkBtsRn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 30 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kE2EREy6FH +++ mktemp ++ local LAST_ERR=/tmp/tmp.8JIAr5M0OB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kE2EREy6FH ++ cat /tmp/tmp.8JIAr5M0OB ++ rm /tmp/tmp.kE2EREy6FH /tmp/tmp.8JIAr5M0OB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 31 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WJIAtefkmk +++ mktemp ++ local LAST_ERR=/tmp/tmp.oC656tcCeI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WJIAtefkmk ++ cat /tmp/tmp.oC656tcCeI ++ rm /tmp/tmp.WJIAtefkmk /tmp/tmp.oC656tcCeI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 32 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6bxcvZV4wN +++ mktemp ++ local LAST_ERR=/tmp/tmp.q5EazAqx5j ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6bxcvZV4wN ++ cat /tmp/tmp.q5EazAqx5j ++ rm /tmp/tmp.6bxcvZV4wN /tmp/tmp.q5EazAqx5j ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 33 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HZdGEt1VpK +++ mktemp ++ local LAST_ERR=/tmp/tmp.BbAnB0qFrB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.HZdGEt1VpK ++ cat /tmp/tmp.BbAnB0qFrB ++ rm /tmp/tmp.HZdGEt1VpK /tmp/tmp.BbAnB0qFrB ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K4N9ynclHg +++ mktemp ++ local LAST_ERR=/tmp/tmp.2tbA5O4hy1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.K4N9ynclHg ++ cat /tmp/tmp.2tbA5O4hy1 ++ rm /tmp/tmp.K4N9ynclHg /tmp/tmp.2tbA5O4hy1 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine sec-context +++ local cluster_name=sec-context ++++ get_proxy sec-context ++++ local target_cluster=sec-context +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Pxc2Yv8vEF ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.z9E1T7swxN +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.Pxc2Yv8vEF +++++ cat /tmp/tmp.z9E1T7swxN +++++ rm /tmp/tmp.Pxc2Yv8vEF /tmp/tmp.z9E1T7swxN +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.NoQFOWnEPD ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.96WjXMQXuE +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.NoQFOWnEPD +++++ cat /tmp/tmp.96WjXMQXuE +++++ rm /tmp/tmp.NoQFOWnEPD /tmp/tmp.96WjXMQXuE +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo sec-context-proxysql ++++ return +++ local cluster_proxy=sec-context-proxysql +++ echo proxysql ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tQY8vmLoA9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rwEJVziOm3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tQY8vmLoA9 ++ cat /tmp/tmp.rwEJVziOm3 ++ rm /tmp/tmp.tQY8vmLoA9 /tmp/tmp.rwEJVziOm3 ++ return 0 + [[ 2 == \2 ]] + desc 'run pvc backup' + set +o xtrace ----------------------------------------------------------------------------------- run pvc backup ----------------------------------------------------------------------------------- + backup=on-demand-backup-pvc + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-on-demand-backup-pvc.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dhdv3WY705 ++ mktemp + local LAST_ERR=/tmp/tmp.aYCee8ENLq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-on-demand-backup-pvc.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dhdv3WY705 perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-pvc created + cat /tmp/tmp.aYCee8ENLq + rm /tmp/tmp.dhdv3WY705 /tmp/tmp.aYCee8ENLq + return 0 + wait_backup on-demand-backup-pvc + local backup=on-demand-backup-pvc + local status=Succeeded + set +o xtrace on-demand-backup-pvc.........................Succeeded + compare_kubectl job.batch/xb-on-demand-backup-pvc + local resource=job.batch/xb-on-demand-backup-pvc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc.yml + local new_result=/tmp/tmp.ohz7RI8nVC/job.batch_xb-on-demand-backup-pvc.yml + desc 'compare job.batch/xb-on-demand-backup-pvc-' + set +o xtrace ----------------------------------------------------------------------------------- compare job.batch/xb-on-demand-backup-pvc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-eks.yml ']' + kubectl_bin get -o yaml job.batch/xb-on-demand-backup-pvc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.TB9WnBZbql ++ mktemp + local LAST_ERR=/tmp/tmp.a6r6hUzrlw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml job.batch/xb-on-demand-backup-pvc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TB9WnBZbql + cat /tmp/tmp.a6r6hUzrlw + rm /tmp/tmp.TB9WnBZbql /tmp/tmp.a6r6hUzrlw + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc.yml /tmp/tmp.ohz7RI8nVC/job.batch_xb-on-demand-backup-pvc.yml ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.iFfNekmkVY +++ mktemp ++ local LAST_ERR=/tmp/tmp.wi6o36bJjN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iFfNekmkVY ++ cat /tmp/tmp.wi6o36bJjN ++ rm /tmp/tmp.iFfNekmkVY /tmp/tmp.wi6o36bJjN ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=security-context-6372 ++ mktemp + local LAST_OUT=/tmp/tmp.uPZNIttDxb ++ mktemp + local LAST_ERR=/tmp/tmp.oIP3VO4048 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3 --namespace=security-context-6372 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uPZNIttDxb Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1774-70b9684b-1-cluster3" modified. + cat /tmp/tmp.oIP3VO4048 + rm /tmp/tmp.uPZNIttDxb /tmp/tmp.oIP3VO4048 + return 0 + desc 'run pvc restore' + set +o xtrace ----------------------------------------------------------------------------------- run pvc restore ----------------------------------------------------------------------------------- + restore=restore-pvc + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-restore-pvc.yml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-6372~ + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.QcCxtQH3N9 ++ mktemp + local LAST_ERR=/tmp/tmp.E8GVhSLgdH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QcCxtQH3N9 perconaxtradbclusterrestore.pxc.percona.com/restore-pvc created + cat /tmp/tmp.E8GVhSLgdH + rm /tmp/tmp.QcCxtQH3N9 /tmp/tmp.E8GVhSLgdH + return 0 + wait_pod restore-src-restore-pvc-sec-context + local pod=restore-src-restore-pvc-sec-context + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo restore-src-restore-pvc-sec-context ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace Error from server (NotFound): pods "restore-src-restore-pvc-sec-context" not found restore-src-restore-pvc-sec-context...................Ok + kubectl_bin get -o yaml pod/restore-src-restore-pvc-sec-context ++ mktemp + local LAST_OUT=/tmp/tmp.BS4iCp9HH4 ++ mktemp + local LAST_ERR=/tmp/tmp.lDSmYecEWo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml pod/restore-src-restore-pvc-sec-context + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BS4iCp9HH4 apiVersion: v1 kind: Pod metadata: annotations: openshift.io/scc: privileged creationTimestamp: "2024-08-02T15:16:33Z" labels: name: restore-src-restore-pvc-sec-context name: restore-src-restore-pvc-sec-context namespace: security-context-6372 ownerReferences: - apiVersion: pxc.percona.com/v1 controller: true kind: PerconaXtraDBClusterRestore name: restore-pvc uid: 277445a1-9020-48cd-ad8d-05ddcad31b75 resourceVersion: "53204" uid: 0afe33cf-9d1d-4182-b7b2-355c7d805492 spec: containers: - command: - recovery-pvc-donor.sh image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imagePullPolicy: Always name: ncat resources: {} securityContext: privileged: true terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /backup name: backup - mountPath: /etc/mysql/ssl name: ssl - mountPath: /etc/mysql/ssl-internal name: ssl-internal - mountPath: /etc/mysql/vault-keyring-secret name: vault-keyring-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-jm7dn readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: gke-jen-pxc-1774-70b9684-default-pool-1977acc3-clcc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1001 supplementalGroups: - 1001 - 1002 - 1003 serviceAccount: percona-xtradb-cluster-operator-workload serviceAccountName: percona-xtradb-cluster-operator-workload terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: backup persistentVolumeClaim: claimName: xb-on-demand-backup-pvc - name: ssl-internal secret: defaultMode: 420 optional: true secretName: some-name-ssl-internal - name: ssl secret: defaultMode: 420 optional: false secretName: some-name-ssl - name: vault-keyring-secret secret: defaultMode: 420 optional: true secretName: sec-context-vault - name: kube-api-access-jm7dn projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2024-08-02T15:16:33Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2024-08-02T15:16:43Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2024-08-02T15:16:43Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2024-08-02T15:16:33Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://b0b663b5f209eedd90753427a845da1216a3de71bd3210d929672138d1491d84 image: docker.io/perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imageID: docker.io/perconalab/percona-xtradb-cluster-operator@sha256:4e15000cdfe2709166c489f2785746db67ad183264e766d574d8025e37edbe90 lastState: {} name: ncat ready: true restartCount: 0 started: true state: running: startedAt: "2024-08-02T15:16:42Z" hostIP: 10.210.0.38 phase: Running podIP: 10.55.16.6 podIPs: - ip: 10.55.16.6 qosClass: BestEffort startTime: "2024-08-02T15:16:33Z" + cat /tmp/tmp.lDSmYecEWo + rm /tmp/tmp.BS4iCp9HH4 /tmp/tmp.lDSmYecEWo + return 0 + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + compare_kubectl pod/restore-src-restore-pvc-sec-context + local resource=pod/restore-src-restore-pvc-sec-context + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context.yml + local new_result=/tmp/tmp.ohz7RI8nVC/pod_restore-src-restore-pvc-sec-context.yml + desc 'compare pod/restore-src-restore-pvc-sec-context-' + set +o xtrace ----------------------------------------------------------------------------------- compare pod/restore-src-restore-pvc-sec-context- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml pod/restore-src-restore-pvc-sec-context ++ mktemp + local LAST_OUT=/tmp/tmp.wP5DLrSQ1L ++ mktemp + local LAST_ERR=/tmp/tmp.ZnzSKJ5Awy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml pod/restore-src-restore-pvc-sec-context + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wP5DLrSQ1L + cat /tmp/tmp.ZnzSKJ5Awy + rm /tmp/tmp.wP5DLrSQ1L /tmp/tmp.ZnzSKJ5Awy + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context.yml /tmp/tmp.ohz7RI8nVC/pod_restore-src-restore-pvc-sec-context.yml + wait_backup_restore restore-pvc + local backup_name=restore-pvc + local status=Succeeded + local wait_time=720 + desc 'wait backup restore' + set +o xtrace ----------------------------------------------------------------------------------- wait backup restore ----------------------------------------------------------------------------------- + set +o xtrace restore-pvc.............................................................................................................Succeeded + compare_kubectl job.batch/restore-job-restore-pvc-sec-context + local resource=job.batch/restore-job-restore-pvc-sec-context + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context.yml + local new_result=/tmp/tmp.ohz7RI8nVC/job.batch_restore-job-restore-pvc-sec-context.yml + desc 'compare job.batch/restore-job-restore-pvc-sec-context-' + set +o xtrace ----------------------------------------------------------------------------------- compare job.batch/restore-job-restore-pvc-sec-context- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.27 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-k127-eks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml job.batch/restore-job-restore-pvc-sec-context ++ mktemp + local LAST_OUT=/tmp/tmp.RKhDmNnuLx ++ mktemp + local LAST_ERR=/tmp/tmp.BNad2zNUT4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml job.batch/restore-job-restore-pvc-sec-context + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RKhDmNnuLx + cat /tmp/tmp.BNad2zNUT4 + rm /tmp/tmp.RKhDmNnuLx /tmp/tmp.BNad2zNUT4 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-k127.yml /tmp/tmp.ohz7RI8nVC/job.batch_restore-job-restore-pvc-sec-context.yml + desc 'run s3 backup' + set +o xtrace ----------------------------------------------------------------------------------- run s3 backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.panQQYJFoY ++ mktemp + local LAST_ERR=/tmp/tmp.nzKOm1eBU9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.panQQYJFoY secret/minio-secret unchanged + cat /tmp/tmp.nzKOm1eBU9 + rm /tmp/tmp.panQQYJFoY /tmp/tmp.nzKOm1eBU9 + return 0 + start_minio + deploy_helm security-context-6372 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Fri Aug 2 15:20:28 2024 NAMESPACE: security-context-6372 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.security-context-6372.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace security-context-6372 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace security-context-6372 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace security-context-6372 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace security-context-6372 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aJJCioYZGY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ae04Gvw3rw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aJJCioYZGY ++ cat /tmp/tmp.ae04Gvw3rw ++ rm /tmp/tmp.aJJCioYZGY /tmp/tmp.ae04Gvw3rw ++ return 0 + MINIO_POD=minio-service-6785948d49-fvzhb + wait_pod minio-service-6785948d49-fvzhb + local pod=minio-service-6785948d49-fvzhb + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo minio-service-6785948d49-fvzhb + local container= + set +o xtrace pod/minio-service-6785948d49-fvzhb condition met minio-service-6785948d49-fvzhb.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.Ny1Q79OzTh ++ mktemp + local LAST_ERR=/tmp/tmp.abJJsv6CMd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ny1Q79OzTh make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.abJJsv6CMd If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: error attaching to container: container is in CONTAINER_EXITED state + rm /tmp/tmp.Ny1Q79OzTh /tmp/tmp.abJJsv6CMd + return 0 + wait_cluster_consistency sec-context 3 2 + local cluster_name=sec-context + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aqGRVvcPOO +++ mktemp ++ local LAST_ERR=/tmp/tmp.xVH0jcO7xc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aqGRVvcPOO ++ cat /tmp/tmp.xVH0jcO7xc ++ rm /tmp/tmp.aqGRVvcPOO /tmp/tmp.xVH0jcO7xc ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kBPaWBNj1Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.pvHm5WM1km ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kBPaWBNj1Q ++ cat /tmp/tmp.pvHm5WM1km ++ rm /tmp/tmp.kBPaWBNj1Q /tmp/tmp.pvHm5WM1km ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine sec-context +++ local cluster_name=sec-context ++++ get_proxy sec-context ++++ local target_cluster=sec-context +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.jpf2Tjs9Q5 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.XXHfe2qHwN +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.jpf2Tjs9Q5 +++++ cat /tmp/tmp.XXHfe2qHwN +++++ rm /tmp/tmp.jpf2Tjs9Q5 /tmp/tmp.XXHfe2qHwN +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.e6sTPN0qVD ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.gsr4hhMLcI +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.e6sTPN0qVD +++++ cat /tmp/tmp.gsr4hhMLcI +++++ rm /tmp/tmp.e6sTPN0qVD /tmp/tmp.gsr4hhMLcI +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo sec-context-proxysql ++++ return +++ local cluster_proxy=sec-context-proxysql +++ echo proxysql ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zSb7phShNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.7bjPgxqsZt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zSb7phShNL ++ cat /tmp/tmp.7bjPgxqsZt ++ rm /tmp/tmp.zSb7phShNL /tmp/tmp.7bjPgxqsZt ++ return 0 + [[ 2 == \2 ]] + backup=on-demand-backup-s3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-on-demand-backup-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xsKAOlSbZO ++ mktemp + local LAST_ERR=/tmp/tmp.Yvt3mBSxDd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-on-demand-backup-s3.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xsKAOlSbZO perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-s3 created + cat /tmp/tmp.Yvt3mBSxDd + rm /tmp/tmp.xsKAOlSbZO /tmp/tmp.Yvt3mBSxDd + return 0 + wait_backup on-demand-backup-s3 + local backup=on-demand-backup-s3 + local status=Succeeded + set +o xtrace on-demand-backup-s3...............Succeeded + compare_kubectl job.batch/xb-on-demand-backup-s3 + local resource=job.batch/xb-on-demand-backup-s3 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3.yml + local new_result=/tmp/tmp.ohz7RI8nVC/job.batch_xb-on-demand-backup-s3.yml + desc 'compare job.batch/xb-on-demand-backup-s3-' + set +o xtrace ----------------------------------------------------------------------------------- compare job.batch/xb-on-demand-backup-s3- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-eks.yml ']' + kubectl_bin get -o yaml job.batch/xb-on-demand-backup-s3 ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.RsnONWqovd ++ mktemp + local LAST_ERR=/tmp/tmp.SIYbZKLoxO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml job.batch/xb-on-demand-backup-s3 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RsnONWqovd + cat /tmp/tmp.SIYbZKLoxO + rm /tmp/tmp.RsnONWqovd /tmp/tmp.SIYbZKLoxO + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3.yml /tmp/tmp.ohz7RI8nVC/job.batch_xb-on-demand-backup-s3.yml + desc 'run s3 restore' + set +o xtrace ----------------------------------------------------------------------------------- run s3 restore ----------------------------------------------------------------------------------- + restore=restore-s3 + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/conf/sec-context-restore-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.o934n5Twrt ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-6372~ + local LAST_ERR=/tmp/tmp.ZCpl1Czzi6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.o934n5Twrt perconaxtradbclusterrestore.pxc.percona.com/restore-s3 created + cat /tmp/tmp.ZCpl1Czzi6 + rm /tmp/tmp.o934n5Twrt /tmp/tmp.ZCpl1Czzi6 + return 0 + wait_backup_restore restore-s3 + local backup_name=restore-s3 + local status=Succeeded + local wait_time=720 + desc 'wait backup restore' + set +o xtrace ----------------------------------------------------------------------------------- wait backup restore ----------------------------------------------------------------------------------- + set +o xtrace restore-s3.............................................................................................................................Succeeded + compare_kubectl job.batch/restore-job-restore-s3-sec-context + local resource=job.batch/restore-job-restore-s3-sec-context + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context.yml + local new_result=/tmp/tmp.ohz7RI8nVC/job.batch_restore-job-restore-s3-sec-context.yml + desc 'compare job.batch/restore-job-restore-s3-sec-context-' + set +o xtrace ----------------------------------------------------------------------------------- compare job.batch/restore-job-restore-s3-sec-context- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.27 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-k127-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-k127-eks.yml ']' + kubectl_bin get -o yaml job.batch/restore-job-restore-s3-sec-context ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("security-context-6372", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.gKRSRNJZBR ++ mktemp + local LAST_ERR=/tmp/tmp.qdfe3d4IzJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml job.batch/restore-job-restore-s3-sec-context + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gKRSRNJZBR + cat /tmp/tmp.qdfe3d4IzJ + rm /tmp/tmp.gKRSRNJZBR /tmp/tmp.qdfe3d4IzJ + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1774/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-k127.yml /tmp/tmp.ohz7RI8nVC/job.batch_restore-job-restore-s3-sec-context.yml + [[ -n '' ]] + destroy security-context-6372 + local namespace=security-context-6372 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + tee /tmp/tmp.ohz7RI8nVC/operator.log + grep -v 'get backup status: Job.batch' + grep -v level=info ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v 'the object has been modified' + sort -u ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.dXTWFr2jt5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.itfqEmtX3B ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dXTWFr2jt5 ++ cat /tmp/tmp.itfqEmtX3B ++ rm /tmp/tmp.dXTWFr2jt5 /tmp/tmp.itfqEmtX3B ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-56bc5d9fb9-fjbq7 ++ mktemp + local LAST_OUT=/tmp/tmp.ya0tFUGRKv ++ mktemp + local LAST_ERR=/tmp/tmp.w9HnzBh71C + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-56bc5d9fb9-fjbq7 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ya0tFUGRKv + cat /tmp/tmp.w9HnzBh71C + rm /tmp/tmp.ya0tFUGRKv /tmp/tmp.w9HnzBh71C + return 0 2024-08-02T14:56:09.193Z INFO setup Manager starting up {"gitCommit": "70b9684b9628ddfcc3dab7c6787cbf6d29753b3d", "gitBranch": "PR-1774-70b9684b", "buildTime": "2024-08-02T13:47:32Z", "goVersion": "go1.22.5", "os": "linux", "arch": "amd64"} 2024-08-02T14:56:09.193Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.27.16-gke.1008000"} 2024-08-02T14:56:09.194Z INFO setup Registering Components. 2024-08-02T14:56:12.762Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2024-08-02T14:56:12.805Z INFO controller-runtime.metrics Starting metrics server 2024-08-02T14:56:12.805Z INFO setup Starting the Cmd. 2024-08-02T14:56:12.806Z INFO controller-runtime.certwatcher Starting certificate watcher 2024-08-02T14:56:12.806Z INFO controller-runtime.certwatcher Updated current TLS certificate 2024-08-02T14:56:12.806Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2024-08-02T14:56:12.806Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2024-08-02T14:56:12.806Z INFO controller-runtime.webhook Starting webhook server 2024-08-02T14:56:12.806Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2024-08-02T14:56:13.207Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2024-08-02T14:56:31.396Z DEBUG events percona-xtradb-cluster-operator-56bc5d9fb9-fjbq7_cc23c5ba-7870-43f7-82d6-57e98c6a982c became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"56cb2d0e-16e1-4d5e-984c-85bb3a892376","apiVersion":"coordination.k8s.io/v1","resourceVersion":"39539"}, "reason": "LeaderElection"} 2024-08-02T14:56:31.396Z INFO Starting Controller {"controller": "pxc-controller"} 2024-08-02T14:56:31.396Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2024-08-02T14:56:31.396Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2024-08-02T14:56:31.397Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2024-08-02T14:56:31.397Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2024-08-02T14:56:31.397Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2024-08-02T14:56:31.397Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2024-08-02T14:56:31.505Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2024-08-02T14:56:31.505Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2024-08-02T14:56:31.524Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2024-08-02T14:56:49.870Z INFO Set CR version {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "version": "1.15.0"} 2024-08-02T14:56:56.458Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-pxc"} 2024-08-02T14:56:56.631Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-proxysql"} 2024-08-02T14:56:56.678Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-pxc"} 2024-08-02T14:56:56.775Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-pxc-unready"} 2024-08-02T14:56:56.934Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-proxysql"} 2024-08-02T14:56:57.070Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-proxysql"} 2024-08-02T14:56:57.111Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-proxysql"} 2024-08-02T14:56:57.218Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "512593cd-eb60-4f56-b850-79a759267390", "object": "sec-context-proxysql-unready"} 2024-08-02T14:56:58.350Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "f61370d3-ed24-4ebe-af34-94bc134db55a", "object": "sec-context-pxc"} 2024-08-02T14:58:14.627Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079", "user": "operator"} 2024-08-02T14:58:14.663Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079", "user": "monitor"} 2024-08-02T14:58:14.759Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079"} 2024-08-02T14:58:14.814Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079"} 2024-08-02T14:58:14.864Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079", "user": "xtrabackup"} 2024-08-02T14:58:14.920Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079"} 2024-08-02T14:58:14.958Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079", "user": "replication"} 2024-08-02T14:58:15.002Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0cf1c71b-7f04-4c98-a829-c93330e5d079", "err": "get primary pxc pod: not found"} 2024-08-02T14:58:19.607Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "530252d2-c3d5-485b-98c8-4f0479f22278", "err": "get primary pxc pod: not found"} 2024-08-02T14:58:24.755Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7322e82f-5391-424b-8798-9ff70c595b21", "err": "get primary pxc pod: not found"} 2024-08-02T14:58:29.895Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "61d27751-b966-43c5-96e4-b52e6b1c220d", "err": "get primary pxc pod: not found"} 2024-08-02T15:00:42.434Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "5a5cc9d1-c556-41ea-9ddd-5e7e4a32aee4", "user": "root"} 2024-08-02T15:00:42.692Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "5a5cc9d1-c556-41ea-9ddd-5e7e4a32aee4", "new version": "8.0.36-28.1"} 2024-08-02T15:00:45.013Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "5a5cc9d1-c556-41ea-9ddd-5e7e4a32aee4"} 2024-08-02T15:00:49.711Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "937efc7b-2268-44ae-a65c-6c81b1551432"} 2024-08-02T15:00:54.990Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "213fd058-cd5f-4463-8f83-9e72ce1d88a2"} 2024-08-02T15:01:00.660Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "75b875d5-144d-4ca9-8495-79e2371b816d"} 2024-08-02T15:01:05.988Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "5aa9f1d7-4109-4c70-9e74-f56240f6df53"} 2024-08-02T15:01:11.536Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "f1e79f34-548c-4fe8-b324-a50d2ddf7035"} 2024-08-02T15:01:16.502Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "ccf3456c-8989-4805-a89e-7adae8c1a3d0"} 2024-08-02T15:01:21.983Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7e7d07a5-bcb1-46f3-9923-82f3521bac7c"} 2024-08-02T15:01:27.484Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "d3139762-d224-4a03-875a-a1f768940f27"} 2024-08-02T15:01:32.507Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "6ce378bb-ba2b-4e45-8ed1-8aba22946adc"} 2024-08-02T15:01:37.996Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "8e6aa107-ad70-4e2b-bd6d-9f5be109ffa5"} 2024-08-02T15:01:43.176Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "83562d55-43b4-4bb9-997f-2ee934b8f8be"} 2024-08-02T15:01:48.600Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a7c8a2a5-6383-4351-aa66-971774e69beb"} 2024-08-02T15:01:54.029Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "e080f1d8-2621-4282-a77e-1391b5adf44c"} 2024-08-02T15:01:59.474Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "fc58268f-734c-48f8-b406-8e9515cac762"} 2024-08-02T15:02:04.596Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "d0186a5d-ad10-4030-9d0e-e2edb01219cd"} 2024-08-02T15:02:05.002Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a9f182ae-9719-40c2-94d7-d891f7853224", "object": "sec-context-pxc"} 2024-08-02T15:02:05.179Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a9f182ae-9719-40c2-94d7-d891f7853224", "object": "sec-context-proxysql"} 2024-08-02T15:02:05.566Z INFO Creating or updating backup job {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a9f182ae-9719-40c2-94d7-d891f7853224", "name": "55cf2-each-hour-pvc", "schedule": "0 */1 * * *"} 2024-08-02T15:02:06.608Z ERROR sync users {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a9f182ae-9719-40c2-94d7-d891f7853224", "error": "exec syncusers: unable to upgrade connection: pod does not exist / / ", "errorVerbose": "exec syncusers: unable to upgrade connection: pod does not exist / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:922\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1240\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} 2024-08-02T15:03:05.590Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "38c3370c-2d07-429e-b6a9-f45c344cfc03", "err": "failed to ensure cluster readonly status: connect to pod sec-context-pxc-1: dial tcp 10.55.16.41:33062: connect: connection refused"} 2024-08-02T15:04:08.578Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "1d9c37c7-ca28-4f71-bb5a-a464baa33400", "err": "get primary pxc pod: failed to get proxy connection: dial tcp: lookup sec-context-proxysql-unready.security-context-6372: i/o timeout"} 2024-08-02T15:04:48.749Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7e34af58-ba31-42b0-83c2-c7610eebe944", "err": "failed to ensure cluster readonly status: connect to pod sec-context-pxc-2: dial tcp: lookup sec-context-pxc-2.sec-context-pxc.security-context-6372: i/o timeout"} 2024-08-02T15:05:38.378Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "e5398956-3ace-4c16-aa3d-7885497cabb1", "err": "failed to ensure cluster readonly status: connect to pod sec-context-pxc-2: dial tcp 10.55.18.9:33062: connect: connection refused"} 2024-08-02T15:05:38.781Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "792a86a6-f541-40c1-9f64-aeee4297e981", "err": "failed to ensure cluster readonly status: connect to pod sec-context-pxc-2: dial tcp 10.55.18.9:33062: connect: connection refused"} 2024-08-02T15:10:11.415Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "e9afa5d3-cb18-417c-af08-f64427e4a18f", "err": "failed to connect to pod sec-context-pxc-0: dial tcp 10.55.17.22:33062: connect: connection refused"} 2024-08-02T15:10:16.563Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "861ee67d-43b3-481a-b31c-9a0146ffbb8d", "primary name": "sec-context-pxc-0.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:10:21.670Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "e121a37a-ce2b-47e0-aa38-1b2fa0f032c9", "primary name": "sec-context-pxc-0.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:10:26.775Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7335d68d-b3e2-4e3a-9dc1-b14cdaf56d44", "primary name": "sec-context-pxc-0.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:10:31.884Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "1370b560-d48c-4a52-a67a-b29958a047f6", "primary name": "sec-context-pxc-0.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:10:37.012Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "926f0cf8-a851-4a07-ac4a-1f563536b4f6", "primary name": "sec-context-pxc-0.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:11:12.691Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "796b01da-6bec-4952-b365-6ef99932bade", "err": "failed to connect to pod sec-context-pxc-1: dial tcp 10.55.16.4:33062: i/o timeout"} 2024-08-02T15:11:39.407Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "b3af13fb-9991-49bc-847f-2287508ac2cb", "primary name": "sec-context-pxc-1.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:11:44.516Z INFO Unable to find primary pod for replication. No pod with name or ip like this {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "231a3b97-474c-4978-aed5-fc1f73509324", "primary name": "sec-context-pxc-1.sec-context-pxc.security-context-6372.svc.cluster.local"} 2024-08-02T15:12:31.707Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "87c4c585-ddfd-49cb-be31-f0e6ab8800b8", "err": "get primary pxc pod: failed to get proxy connection: dial tcp: lookup sec-context-proxysql-unready.security-context-6372: i/o timeout"} 2024-08-02T15:14:23.917Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "c2e3ee6e-29c2-47e0-9a61-956ced339c97"} 2024-08-02T15:14:29.168Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "2f5614d9-33ac-4f52-a87d-8ae93abf49b0"} 2024-08-02T15:14:34.096Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7bfb48fe-2629-491d-ad01-e04d102f4134"} 2024-08-02T15:14:39.388Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "85e81cf8-cd20-440d-88df-a0e659a36bae"} 2024-08-02T15:14:44.797Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a9d662b2-1758-496f-b58c-74da1740e670"} 2024-08-02T15:14:46.976Z INFO Creating a new volume for backup {"controller": "pxcbackup-controller", "namespace": "security-context-6372", "name": "on-demand-backup-pvc", "reconcileID": "41df69fd-5943-401d-a10c-3f6005314b3f", "Namespace": "security-context-6372", "Name": "xb-on-demand-backup-pvc"} 2024-08-02T15:14:47.074Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "security-context-6372", "name": "on-demand-backup-pvc", "reconcileID": "41df69fd-5943-401d-a10c-3f6005314b3f", "Namespace": "security-context-6372", "Name": "xb-on-demand-backup-pvc"} 2024-08-02T15:14:50.091Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "8bb64890-8e5a-4ff8-9c35-7370cb76b6e2"} 2024-08-02T15:14:55.544Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "4895c353-070f-4b4a-94e8-d9a76d9a7f35"} 2024-08-02T15:15:00.851Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "f0667071-4b14-4686-a503-604f932a783f"} 2024-08-02T15:15:06.228Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7ee31ecf-2898-4b21-9a98-1d1e253514f2"} 2024-08-02T15:15:11.732Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "479bd1c2-b496-46db-9332-45e6b7e8ba21"} 2024-08-02T15:15:16.858Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "dff99813-e87d-4376-8a24-a2559fca661a"} 2024-08-02T15:15:22.240Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "759e9d16-685b-41ce-bb12-3d54bdeeb331"} 2024-08-02T15:15:27.796Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "d6354dd4-14df-470a-b26a-4a89e214fc3e"} 2024-08-02T15:15:32.924Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "2b8efc44-e4f8-44b2-99a9-7c409e8acfc6"} 2024-08-02T15:15:37.477Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-pvc", "reconcileID": "298f07c4-5bbc-4fca-bf07-794560cf3244"} 2024-08-02T15:15:38.301Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "1cb0d81d-8839-42d1-a3c9-b2620558ae9e"} 2024-08-02T15:15:43.736Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "6dca703a-1405-4b0a-9c1a-a17d407aa037"} 2024-08-02T15:15:49.125Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "31cf0cbc-1085-49dd-8ee9-4e99bd0788da"} 2024-08-02T15:15:51.553Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-pvc", "reconcileID": "298f07c4-5bbc-4fca-bf07-794560cf3244", "cluster": "sec-context"} 2024-08-02T15:15:51.694Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "701e26e4-7694-4645-ad21-dd7127def700"} 2024-08-02T15:15:51.696Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "701e26e4-7694-4645-ad21-dd7127def700", "object": "sec-context-pxc"} 2024-08-02T15:15:51.774Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "701e26e4-7694-4645-ad21-dd7127def700", "object": "sec-context-proxysql"} 2024-08-02T15:16:33.670Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-pvc", "reconcileID": "298f07c4-5bbc-4fca-bf07-794560cf3244", "cluster": "sec-context", "backup": "on-demand-backup-pvc"} 2024-08-02T15:17:00.187Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-pvc", "reconcileID": "298f07c4-5bbc-4fca-bf07-794560cf3244", "cluster": "sec-context"} 2024-08-02T15:17:00.302Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "d302664c-e7d4-462d-a313-a3c6a51df9cc", "object": "sec-context-pxc"} 2024-08-02T15:17:00.474Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "d302664c-e7d4-462d-a313-a3c6a51df9cc", "object": "sec-context-proxysql"} 2024-08-02T15:20:22.423Z INFO You can view xtrabackup log: 2024-08-02T15:20:24.402Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "b136e204-f456-4826-abeb-ef86edbdb712"} 2024-08-02T15:20:29.340Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "05d72064-2bf0-4e52-8fc7-77d2670eb3f8"} 2024-08-02T15:20:34.625Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "1b35c494-8d69-45d4-9a53-74c54db1e57f"} 2024-08-02T15:20:41.014Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "916c3f31-61e5-47f5-b833-063c36167f74"} 2024-08-02T15:20:46.626Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "9c033ede-3555-49bf-a0ad-45917433ef5e"} 2024-08-02T15:20:52.201Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "3d6f39c1-5c85-432a-80b5-c162132a4679"} 2024-08-02T15:20:57.580Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "2574ef16-34f9-4451-aadb-9ac145b2ad4c"} 2024-08-02T15:21:02.955Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "1fb86452-e82a-4d8b-9d5d-01b0fb2f8add"} 2024-08-02T15:21:08.298Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "2ef34eea-e566-4ac1-bb26-6937791ba19c"} 2024-08-02T15:21:14.163Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "adef3f58-41c0-41bb-a297-7d54c8c265a1"} 2024-08-02T15:21:19.004Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "9884420c-46b2-426f-8cde-7d14edadf49b"} 2024-08-02T15:21:24.407Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "e78cc0ca-e5c0-47a2-ba48-d66ad12e1773"} 2024-08-02T15:21:30.733Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "61fba671-6980-4fb4-8f1a-e6222fe5ee86"} 2024-08-02T15:21:35.973Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "71228230-8916-4ef8-9257-057234adbc26"} 2024-08-02T15:21:41.318Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "578bf2b1-5ce3-4d57-967f-3305587b7f36"} 2024-08-02T15:21:46.636Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "66f3e72c-e24a-4437-bb87-f5cc37cebbe5"} 2024-08-02T15:21:52.312Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "93b68ebc-68e8-4c9d-8ee9-de5107535cf0"} 2024-08-02T15:21:55.633Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "security-context-6372", "name": "on-demand-backup-s3", "reconcileID": "12652dc7-6ce1-48c9-9f23-275832f85d4a", "Namespace": "security-context-6372", "Name": "xb-on-demand-backup-s3"} 2024-08-02T15:21:57.593Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "42099777-4cd0-4cff-b578-9209161b7ce0"} 2024-08-02T15:22:02.804Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "0766b4a9-172b-4b43-ba70-6cd2da9a581b"} 2024-08-02T15:22:07.973Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "328c5ea2-cd03-4eb0-9c6a-8850468129ee"} 2024-08-02T15:22:14.225Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a51bc6d9-1b61-4a2a-9f00-36fac9a28089"} 2024-08-02T15:22:18.856Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "a5d325e1-d43f-43bf-8b82-64087b9ce16d"} 2024-08-02T15:22:24.186Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "12a792c6-7bbc-4238-8d4e-8f5bbae3dc65"} 2024-08-02T15:22:30.320Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "ee4be6d5-807a-4da0-a7d7-0454756abb32"} 2024-08-02T15:22:30.794Z INFO backup restore request {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-s3", "reconcileID": "68433d3a-1389-4b1f-ba3c-f1f6d5346c8f"} 2024-08-02T15:22:30.834Z INFO stopping cluster {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-s3", "reconcileID": "68433d3a-1389-4b1f-ba3c-f1f6d5346c8f", "cluster": "sec-context"} 2024-08-02T15:22:30.958Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "3f8bdbe6-b53d-42a1-9ac6-ea57b833f2d9"} 2024-08-02T15:22:30.960Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "3f8bdbe6-b53d-42a1-9ac6-ea57b833f2d9", "object": "sec-context-pxc"} 2024-08-02T15:22:31.052Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "3f8bdbe6-b53d-42a1-9ac6-ea57b833f2d9", "object": "sec-context-proxysql"} 2024-08-02T15:23:15.000Z INFO starting restore {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-s3", "reconcileID": "68433d3a-1389-4b1f-ba3c-f1f6d5346c8f", "cluster": "sec-context", "backup": "on-demand-backup-s3"} 2024-08-02T15:23:37.076Z INFO starting cluster {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-s3", "reconcileID": "68433d3a-1389-4b1f-ba3c-f1f6d5346c8f", "cluster": "sec-context"} 2024-08-02T15:23:37.149Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "f0b59339-43fa-41d6-8930-c8fa19b633a3", "object": "sec-context-pxc"} 2024-08-02T15:23:37.203Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "f0b59339-43fa-41d6-8930-c8fa19b633a3", "object": "sec-context-proxysql"} 2024-08-02T15:24:16.436Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "7bcb1135-e1ce-4bc1-a8eb-692e2cc52a85", "err": "get primary pxc pod: not found"} 2024-08-02T15:26:43.276Z INFO You can view xtrabackup log: 2024-08-02T15:26:45.520Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "security-context-6372", "name": "sec-context", "reconcileID": "964cb568-b3d4-4e3f-9cef-7c9c920b1e2a"} {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-pvc", "reconcileID": "298f07c4-5bbc-4fca-bf07-794560cf3244"} {"controller": "pxcrestore-controller", "namespace": "security-context-6372", "name": "restore-s3", "reconcileID": "68433d3a-1389-4b1f-ba3c-f1f6d5346c8f"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1242 If everything is fine, you can cleanup the job: $ kubectl delete pxc-restore/restore-pvc $ kubectl delete pxc-restore/restore-s3 $ kubectl logs job/restore-job-restore-pvc-sec-context $ kubectl logs job/restore-job-restore-s3-sec-context [mysql] 2024/08/02 15:10:11 connection.go:49: read tcp 10.55.17.21:41014->10.55.17.22:33062: read: connection reset by peer + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n security-context-6372 sec-context --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/sec-context patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.Fn8TjusZW8 ++ mktemp + local LAST_ERR=/tmp/tmp.FTsVlRMOAf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Fn8TjusZW8 perconaxtradbcluster.pxc.percona.com "sec-context" deleted + cat /tmp/tmp.FTsVlRMOAf + rm /tmp/tmp.Fn8TjusZW8 /tmp/tmp.FTsVlRMOAf + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.FZn1M1ulDH ++ mktemp + local LAST_ERR=/tmp/tmp.DEYLOf5MSd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FZn1M1ulDH perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-pvc" deleted perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-s3" deleted + cat /tmp/tmp.DEYLOf5MSd + rm /tmp/tmp.FZn1M1ulDH /tmp/tmp.DEYLOf5MSd + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.bwMYmMacLQ ++ mktemp + local LAST_ERR=/tmp/tmp.tPE69C3LX8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bwMYmMacLQ perconaxtradbclusterrestore.pxc.percona.com "restore-pvc" deleted perconaxtradbclusterrestore.pxc.percona.com "restore-s3" deleted + cat /tmp/tmp.tPE69C3LX8 + rm /tmp/tmp.bwMYmMacLQ /tmp/tmp.tPE69C3LX8 + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.nxfIUYtRVj ++ mktemp + local LAST_ERR=/tmp/tmp.wQ98HXHY8l + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nxfIUYtRVj validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.wQ98HXHY8l + rm /tmp/tmp.nxfIUYtRVj /tmp/tmp.wQ98HXHY8l + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace security-context-6372 + rm -rf /tmp/tmp.ohz7RI8nVC + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Zg4BLuTruy + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.z8y8t4SIkJ ++ mktemp + local LAST_ERR=/tmp/tmp.mXDcMdSNTj + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.obRXQPyxGM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace security-context-6372