++ echo 'Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/security-context.log' Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/security-context.log ++ '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: No Auth Provider found for name "gcp" +++ jq -r .serverVersion.gitVersion +++ kubectl version -o json +++ grep '\-eks\-' ++ '[' ']' ++ EKS=0 +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ kubectl version -o json ++ KUBE_VERSION=1.20 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.8.2 ++ '[' v3 == v2 ']' + create_infra security-context-10689 + local ns=security-context-10689 + '[' -n pxc-operator ']' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide error: the server doesn't have a resource type "pxc" + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "pxc" + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.5Sk3nv7dWZ ++ mktemp + local LAST_ERR=/tmp/tmp.iQG4et56UY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.5Sk3nv7dWZ + cat /tmp/tmp.iQG4et56UY error: the server doesn't have a resource type "pxc" + rm /tmp/tmp.5Sk3nv7dWZ /tmp/tmp.iQG4et56UY + return 1 + : + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.kcxyg2J5uQ ++ mktemp + local LAST_ERR=/tmp/tmp.dD1PmQn6Oe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.kcxyg2J5uQ + cat /tmp/tmp.dD1PmQn6Oe error: the server doesn't have a resource type "pxc-backup" + rm /tmp/tmp.kcxyg2J5uQ /tmp/tmp.dD1PmQn6Oe + return 1 + : + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.82KXIb0IJv ++ mktemp + local LAST_ERR=/tmp/tmp.8BXiQwKTTH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.82KXIb0IJv + cat /tmp/tmp.8BXiQwKTTH error: the server doesn't have a resource type "pxc-restore" + rm /tmp/tmp.82KXIb0IJv /tmp/tmp.8BXiQwKTTH + return 1 + : + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + '[' '!' -z '' ']' + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.6UNyJxyay2 + xargs kubectl delete ns + kubectl_bin get ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.JF2LeyFWDQ ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.nVtMFU1Z6i + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.RGXObCLFNK + local exit_status=0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.JF2LeyFWDQ + cat /tmp/tmp.RGXObCLFNK + rm /tmp/tmp.JF2LeyFWDQ /tmp/tmp.RGXObCLFNK + return 0 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.6UNyJxyay2 + cat /tmp/tmp.nVtMFU1Z6i Error from server (NotFound): namespaces "pxc-operator" not found + rm /tmp/tmp.6UNyJxyay2 /tmp/tmp.nVtMFU1Z6i + return 1 + : + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + set +o xtrace namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jAhKVTTJzr ++ mktemp + local LAST_ERR=/tmp/tmp.RtMHWTPhtz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.jAhKVTTJzr namespace/pxc-operator created + cat /tmp/tmp.RtMHWTPhtz + rm /tmp/tmp.jAhKVTTJzr /tmp/tmp.RtMHWTPhtz + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.qE5NrYyDDl +++ mktemp ++ local LAST_ERR=/tmp/tmp.KA45P5xfgC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qE5NrYyDDl ++ cat /tmp/tmp.KA45P5xfgC ++ rm /tmp/tmp.qE5NrYyDDl /tmp/tmp.KA45P5xfgC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.taVdo8XwjZ ++ mktemp + local LAST_ERR=/tmp/tmp.YLvdrSrLnU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.taVdo8XwjZ Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling" modified. + cat /tmp/tmp.YLvdrSrLnU + rm /tmp/tmp.taVdo8XwjZ /tmp/tmp.YLvdrSrLnU + return 0 + deploy_operator + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Z680B8iCjG ++ mktemp + local LAST_ERR=/tmp/tmp.hLkEJgvYn3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.Z680B8iCjG customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com created customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com created customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com created customresourcedefinition.apiextensions.k8s.io/perconaxtradbbackups.pxc.percona.com created + cat /tmp/tmp.hLkEJgvYn3 + rm /tmp/tmp.Z680B8iCjG /tmp/tmp.hLkEJgvYn3 + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qqu8MPyjWl ++ mktemp + local LAST_ERR=/tmp/tmp.QH3VJLs7Ng + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.qqu8MPyjWl clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator created serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator created + cat /tmp/tmp.QH3VJLs7Ng + rm /tmp/tmp.qqu8MPyjWl /tmp/tmp.QH3VJLs7Ng + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + kubectl_bin apply -f - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a^' ++ mktemp + local LAST_OUT=/tmp/tmp.QQbTVqOb6O ++ mktemp + local LAST_ERR=/tmp/tmp.01gFkpI1Bw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.QQbTVqOb6O deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.01gFkpI1Bw + rm /tmp/tmp.QQbTVqOb6O /tmp/tmp.01gFkpI1Bw + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.NXnvB7QAlC +++ mktemp ++ local LAST_ERR=/tmp/tmp.c8pd9O2qgB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.NXnvB7QAlC ++ cat /tmp/tmp.c8pd9O2qgB ++ rm /tmp/tmp.NXnvB7QAlC /tmp/tmp.c8pd9O2qgB ++ return 0 + wait_pod percona-xtradb-cluster-operator-5699d7755d-n47nz 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5699d7755d-n47nz + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-5699d7755d-n47nz ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace percona-xtradb-cluster-operator-5699d7755d-n47nz.Ok + sleep 3 + create_namespace security-context-10689 + local namespace=security-context-10689 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + '[' '!' -z '' ']' + kubectl_bin delete namespace security-context-10689 + kubectl_bin get ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.8WqU62UnBq + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.O4GCDM6qPq ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.TSADcwaPMN + local LAST_ERR=/tmp/tmp.flT0AKqX8l + local exit_status=0 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete namespace security-context-10689 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.8WqU62UnBq + cat /tmp/tmp.TSADcwaPMN + rm /tmp/tmp.8WqU62UnBq /tmp/tmp.TSADcwaPMN + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace security-context-10689 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace security-context-10689 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.O4GCDM6qPq + cat /tmp/tmp.flT0AKqX8l Error from server (NotFound): namespaces "security-context-10689" not found + rm /tmp/tmp.O4GCDM6qPq /tmp/tmp.flT0AKqX8l + return 1 + : + wait_for_delete namespace/security-context-10689 + local res=namespace/security-context-10689 + set +o xtrace namespace/security-context-10689 - Error from server (NotFound): namespaces "security-context-10689" not found + kubectl_bin create namespace security-context-10689 ++ mktemp + local LAST_OUT=/tmp/tmp.y2au4UrBHo ++ mktemp + local LAST_ERR=/tmp/tmp.UwBZ0eRGR7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace security-context-10689 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.y2au4UrBHo namespace/security-context-10689 created + cat /tmp/tmp.UwBZ0eRGR7 + rm /tmp/tmp.y2au4UrBHo /tmp/tmp.UwBZ0eRGR7 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.82FbdO3MB2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VVuiItzcoY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.82FbdO3MB2 ++ cat /tmp/tmp.VVuiItzcoY ++ rm /tmp/tmp.82FbdO3MB2 /tmp/tmp.VVuiItzcoY ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=security-context-10689 ++ mktemp + local LAST_OUT=/tmp/tmp.NVSTM7cTs7 ++ mktemp + local LAST_ERR=/tmp/tmp.x6u8LUZTXG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=security-context-10689 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.NVSTM7cTs7 Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling" modified. + cat /tmp/tmp.x6u8LUZTXG + rm /tmp/tmp.NVSTM7cTs7 /tmp/tmp.x6u8LUZTXG + return 0 + apply_secrets + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VpbSnKBqI0 ++ mktemp + local LAST_ERR=/tmp/tmp.mW40Pmy74k + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.VpbSnKBqI0 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.mW40Pmy74k + rm /tmp/tmp.VpbSnKBqI0 /tmp/tmp.mW40Pmy74k + return 0 + deploy_cert_manager + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.MvM59UYZBi ++ mktemp + local LAST_ERR=/tmp/tmp.fngNmXLufs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace cert-manager + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MvM59UYZBi namespace/cert-manager created + cat /tmp/tmp.fngNmXLufs + rm /tmp/tmp.MvM59UYZBi /tmp/tmp.fngNmXLufs + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.pYJQ2qRWHy ++ mktemp + local LAST_ERR=/tmp/tmp.pfKNKi5udS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.pYJQ2qRWHy namespace/cert-manager labeled + cat /tmp/tmp.pfKNKi5udS + rm /tmp/tmp.pYJQ2qRWHy /tmp/tmp.pfKNKi5udS + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.TcexCDZpKd ++ mktemp + local LAST_ERR=/tmp/tmp.mVgTkwlnvR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml --validate=false + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.TcexCDZpKd customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created namespace/cert-manager configured serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created + cat /tmp/tmp.mVgTkwlnvR Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.TcexCDZpKd /tmp/tmp.mVgTkwlnvR + return 0 + sleep 60 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/service-account.yml ++ mktemp + local LAST_OUT=/tmp/tmp.q03Lxr6oQL ++ mktemp + local LAST_ERR=/tmp/tmp.hhPXKPEUWn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/service-account.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.q03Lxr6oQL serviceaccount/percona-xtradb-cluster-operator-workload created + cat /tmp/tmp.hhPXKPEUWn + rm /tmp/tmp.q03Lxr6oQL /tmp/tmp.hhPXKPEUWn + return 0 + [[ -n '' ]] + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + cluster=sec-context + spinup_pxc sec-context /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context.yml 3 10 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + local cluster=sec-context + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xZwHFPNFfv ++ mktemp + local LAST_ERR=/tmp/tmp.vBV0ekCYZo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.xZwHFPNFfv secret/my-cluster-secrets created + cat /tmp/tmp.vBV0ekCYZo + rm /tmp/tmp.xZwHFPNFfv /tmp/tmp.vBV0ekCYZo + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + local LAST_OUT=/tmp/tmp.aqBGPQlcSA + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-10689~ + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_ERR=/tmp/tmp.5krRGzo6Jq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.aqBGPQlcSA deployment.apps/pxc-client created + cat /tmp/tmp.5krRGzo6Jq + rm /tmp/tmp.aqBGPQlcSA /tmp/tmp.5krRGzo6Jq + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context.yml + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-10689~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + local LAST_OUT=/tmp/tmp.Ns6VWP1nKZ ++ mktemp + local LAST_ERR=/tmp/tmp.agYgCUeteZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.Ns6VWP1nKZ perconaxtradbcluster.pxc.percona.com/sec-context created + cat /tmp/tmp.agYgCUeteZ + rm /tmp/tmp.Ns6VWP1nKZ /tmp/tmp.agYgCUeteZ + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy sec-context ++ local target_cluster=sec-context +++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CKYpM6HZaS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FH0QTlLOdJ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.CKYpM6HZaS +++ cat /tmp/tmp.FH0QTlLOdJ +++ rm /tmp/tmp.CKYpM6HZaS /tmp/tmp.FH0QTlLOdJ +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5x8YAql06R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bFKtPOud8P +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.5x8YAql06R +++ cat /tmp/tmp.bFKtPOud8P +++ rm /tmp/tmp.5x8YAql06R /tmp/tmp.bFKtPOud8P +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo sec-context-proxysql ++ return + local proxy=sec-context-proxysql + wait_for_running sec-context-proxysql 1 + local name=sec-context-proxysql + let last_pod=0 + : + local max_retry=480 ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-proxysql-0 480 + local pod=sec-context-proxysql-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo sec-context-proxysql-0 + local container=proxysql + set +o xtrace sec-context-proxysql-0.............Ok + wait_for_running sec-context-pxc 3 + local name=sec-context-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-0 480 + local pod=sec-context-pxc-0 + local max_retry=480 + local ns= ++ echo sec-context-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace sec-context-pxc-0......................Ok + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-1 480 + local pod=sec-context-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo sec-context-pxc-1 + local container=pxc + set +o xtrace sec-context-pxc-1...................................Ok + for i in '$(seq 0 $last_pod)' + wait_pod sec-context-pxc-2 480 + local pod=sec-context-pxc-2 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo sec-context-pxc-2 + local container=pxc + set +o xtrace sec-context-pxc-2...........................................Ok + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h sec-context-proxysql -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h sec-context-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qIYUAQ72zt +++ mktemp ++ local LAST_ERR=/tmp/tmp.FIkqBLybyB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qIYUAQ72zt ++ cat /tmp/tmp.FIkqBLybyB ++ rm /tmp/tmp.qIYUAQ72zt /tmp/tmp.FIkqBLybyB ++ return 0 + client_pod=pxc-client-5d749ff8b6-g5rnq + wait_pod pxc-client-5d749ff8b6-g5rnq + local pod=pxc-client-5d749ff8b6-g5rnq + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-g5rnq ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-g5rnq.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h sec-context-proxysql -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h sec-context-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E3KsI2Jb5H +++ mktemp ++ local LAST_ERR=/tmp/tmp.P229WOTAkO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.E3KsI2Jb5H ++ cat /tmp/tmp.P229WOTAkO ++ rm /tmp/tmp.E3KsI2Jb5H /tmp/tmp.P229WOTAkO ++ return 0 + client_pod=pxc-client-5d749ff8b6-g5rnq + wait_pod pxc-client-5d749ff8b6-g5rnq + local pod=pxc-client-5d749ff8b6-g5rnq + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-g5rnq ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-g5rnq.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-0.sec-context-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SDFFFRECIl +++ mktemp ++ local LAST_ERR=/tmp/tmp.8nR7ixonZj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.SDFFFRECIl ++ cat /tmp/tmp.8nR7ixonZj ++ rm /tmp/tmp.SDFFFRECIl /tmp/tmp.8nR7ixonZj ++ return 0 + client_pod=pxc-client-5d749ff8b6-g5rnq + wait_pod pxc-client-5d749ff8b6-g5rnq + local pod=pxc-client-5d749ff8b6-g5rnq + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-g5rnq + local container= + set +o xtrace pxc-client-5d749ff8b6-g5rnq.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.woNsh8iaMb/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.woNsh8iaMb/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-1.sec-context-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K9qUhzlt3k +++ mktemp ++ local LAST_ERR=/tmp/tmp.R2jGtZHcfs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.K9qUhzlt3k ++ cat /tmp/tmp.R2jGtZHcfs ++ rm /tmp/tmp.K9qUhzlt3k /tmp/tmp.R2jGtZHcfs ++ return 0 + client_pod=pxc-client-5d749ff8b6-g5rnq + wait_pod pxc-client-5d749ff8b6-g5rnq + local pod=pxc-client-5d749ff8b6-g5rnq + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-g5rnq ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-5d749ff8b6-g5rnq.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.woNsh8iaMb/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.woNsh8iaMb/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h sec-context-pxc-2.sec-context-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aa9i8qLucC +++ mktemp ++ local LAST_ERR=/tmp/tmp.ggmXGmzPlT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.aa9i8qLucC ++ cat /tmp/tmp.ggmXGmzPlT ++ rm /tmp/tmp.aa9i8qLucC /tmp/tmp.ggmXGmzPlT ++ return 0 + client_pod=pxc-client-5d749ff8b6-g5rnq + wait_pod pxc-client-5d749ff8b6-g5rnq + local pod=pxc-client-5d749ff8b6-g5rnq + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-g5rnq + local container= + set +o xtrace pxc-client-5d749ff8b6-g5rnq.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.woNsh8iaMb/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/select-1.sql /tmp/tmp.woNsh8iaMb/select-1.sql ++ is_keyring_plugin_in_use sec-context ++ local cluster=sec-context ++ kubectl_bin exec -it sec-context-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bt6zlyXNy2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xv8V96vKCs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec -it sec-context-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.bt6zlyXNy2 ++ cat /tmp/tmp.xv8V96vKCs Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.bt6zlyXNy2 /tmp/tmp.xv8V96vKCs ++ return 0 + '[' '' ']' + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/sec-context-pxc + local resource=statefulset/sec-context-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc.yml + local new_result=/tmp/tmp.woNsh8iaMb/statefulset_sec-context-pxc.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/sec-context-pxc + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.nodePort' + yq d - '**.procMount' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.storageClassName' + yq d - '**.creationTimestamp' + yq d - '**.finalizers' + yq d - spec.nodeName + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."percona.com/*"' + yq d - '**.volumeName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**.imagePullSecrets' + yq d - '**.healthCheckNodePort' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - status + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.enableServiceLinks' + yq d - '**.controller-uid' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + yq d - '**.preemptionPolicy' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/security-context-10689/namespace/g + yq d - '**.creationTimestamp' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.VTqWldSfg2 ++ mktemp + local LAST_ERR=/tmp/tmp.Ukx0cJMFzD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/sec-context-pxc + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.VTqWldSfg2 + cat /tmp/tmp.Ukx0cJMFzD + rm /tmp/tmp.VTqWldSfg2 /tmp/tmp.Ukx0cJMFzD + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc.yml /tmp/tmp.woNsh8iaMb/statefulset_sec-context-pxc.yml + compare_kubectl statefulset/sec-context-proxysql + local resource=statefulset/sec-context-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql.yml + local new_result=/tmp/tmp.woNsh8iaMb/statefulset_sec-context-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + yq d - '**.uid' + yq d - metadata.resourceVersion + kubectl_bin get -o yaml statefulset/sec-context-proxysql + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.procMount' + yq d - '**.finalizers' + yq d - spec.volumeMode + yq d - '**.storageClassName' + yq d - spec.nodeName + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**."volume.kubernetes.io/selected-node"' ++ mktemp + yq d - '**."percona.com/*"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.healthCheckNodePort' + yq d - '**.nodePort' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - '**.clusterIPs' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.dataSource' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.clusterIP' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.namespace' + local LAST_OUT=/tmp/tmp.RwxcgpDBBQ + yq d - 'spec.volumeClaimTemplates.*.kind' ++ mktemp + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.creationTimestamp' + local LAST_ERR=/tmp/tmp.SAZYUZoSCw + local exit_status=0 + yq d - '**.controller-uid' + yq d - metadata.managedFields + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/sec-context-proxysql + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/security-context-10689/namespace/g + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.RwxcgpDBBQ + cat /tmp/tmp.SAZYUZoSCw + rm /tmp/tmp.RwxcgpDBBQ /tmp/tmp.SAZYUZoSCw + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql.yml /tmp/tmp.woNsh8iaMb/statefulset_sec-context-proxysql.yml + desc 'change security context in PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- change security context in PXC cluster ----------------------------------------------------------------------------------- + pfx=-changes + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-changes.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-changes.yml ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.security-context-10689~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.3CnfTqubxL + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.mo8ITfARKG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-changes.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.3CnfTqubxL perconaxtradbcluster.pxc.percona.com/sec-context configured + cat /tmp/tmp.mo8ITfARKG + rm /tmp/tmp.3CnfTqubxL /tmp/tmp.mo8ITfARKG + return 0 + sleep 30 + desc 'check if service and statefulset chenged to expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset chenged to expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/sec-context-pxc -changes + local resource=statefulset/sec-context-pxc + local postfix=-changes + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes.yml + local new_result=/tmp/tmp.woNsh8iaMb/statefulset_sec-context-pxc.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/sec-context-pxc + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.procMount' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.storageClassName' + yq d - '**."percona.com/*"' + yq d - '**.finalizers' + yq d - '**.creationTimestamp' + yq d - status + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - spec.nodeName + yq d - '**.volumeName' + yq d - '**.healthCheckNodePort' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.imagePullSecrets' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**.nodePort' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.(name==suffix)' + yq d - '**.enableServiceLinks' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - '**.creationTimestamp' + /usr/bin/sed s/security-context-10689/namespace/g + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.I6dmnunpi0 ++ mktemp + local LAST_ERR=/tmp/tmp.eKWia3uSYi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/sec-context-pxc + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.I6dmnunpi0 + cat /tmp/tmp.eKWia3uSYi + rm /tmp/tmp.I6dmnunpi0 /tmp/tmp.eKWia3uSYi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-pxc-changes.yml /tmp/tmp.woNsh8iaMb/statefulset_sec-context-pxc.yml + compare_kubectl statefulset/sec-context-proxysql -changes + local resource=statefulset/sec-context-proxysql + local postfix=-changes + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes.yml + local new_result=/tmp/tmp.woNsh8iaMb/statefulset_sec-context-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ bc -l ++ echo '1.20 >= 1.21' + '[' 0 -eq 1 ']' + return 1 + yq d - '**."kubernetes.io/pvc-protection"' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."percona.com/*"' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.clusterIP' + yq d - '**.healthCheckNodePort' + yq d - '**.nodePort' + yq d - '**.finalizers' + yq d - '**.storageClassName' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - '**.clusterIPs' + yq d - status + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.uid' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.namespace' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + yq d - '**.preemptionPolicy' + yq d - metadata.selfLink + yq d - '**.controller-uid' + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.creationTimestamp' + yq d - metadata.managedFields + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/security-context-10689/namespace/g + kubectl_bin get -o yaml statefulset/sec-context-proxysql ++ mktemp + local LAST_OUT=/tmp/tmp.j6V1mPymi0 ++ mktemp + local LAST_ERR=/tmp/tmp.GOo7TqGj2z + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/sec-context-proxysql + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.j6V1mPymi0 + cat /tmp/tmp.GOo7TqGj2z + rm /tmp/tmp.j6V1mPymi0 /tmp/tmp.GOo7TqGj2z + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/statefulset_sec-context-proxysql-changes.yml /tmp/tmp.woNsh8iaMb/statefulset_sec-context-proxysql.yml + wait_cluster_consistency sec-context 3 2 + local cluster_name=sec-context + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + sleep 7 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C0CZMHtxDs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RVpZ6ltcZr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.C0CZMHtxDs ++ cat /tmp/tmp.RVpZ6ltcZr ++ rm /tmp/tmp.C0CZMHtxDs /tmp/tmp.RVpZ6ltcZr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bw8J3PHOqa +++ mktemp ++ local LAST_ERR=/tmp/tmp.LF38YmPrhF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.bw8J3PHOqa ++ cat /tmp/tmp.LF38YmPrhF ++ rm /tmp/tmp.bw8J3PHOqa /tmp/tmp.LF38YmPrhF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VckERNMk0B +++ mktemp ++ local LAST_ERR=/tmp/tmp.AkMAI61cu4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.VckERNMk0B ++ cat /tmp/tmp.AkMAI61cu4 ++ rm /tmp/tmp.VckERNMk0B /tmp/tmp.AkMAI61cu4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.clbqOQ0qWi +++ mktemp ++ local LAST_ERR=/tmp/tmp.kgVT9ShRwM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.clbqOQ0qWi ++ cat /tmp/tmp.kgVT9ShRwM ++ rm /tmp/tmp.clbqOQ0qWi /tmp/tmp.kgVT9ShRwM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mKCFzpkx2k +++ mktemp ++ local LAST_ERR=/tmp/tmp.sDbd1G8gf4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.mKCFzpkx2k ++ cat /tmp/tmp.sDbd1G8gf4 ++ rm /tmp/tmp.mKCFzpkx2k /tmp/tmp.sDbd1G8gf4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rgsdqlaVsg +++ mktemp ++ local LAST_ERR=/tmp/tmp.bAhM73hPGG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.rgsdqlaVsg ++ cat /tmp/tmp.bAhM73hPGG ++ rm /tmp/tmp.rgsdqlaVsg /tmp/tmp.bAhM73hPGG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.McrKDhdrSR +++ mktemp ++ local LAST_ERR=/tmp/tmp.JA4C0TReLS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.McrKDhdrSR ++ cat /tmp/tmp.JA4C0TReLS ++ rm /tmp/tmp.McrKDhdrSR /tmp/tmp.JA4C0TReLS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lTguLgMEmq +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tt6JAZ2JJV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.lTguLgMEmq ++ cat /tmp/tmp.Tt6JAZ2JJV ++ rm /tmp/tmp.lTguLgMEmq /tmp/tmp.Tt6JAZ2JJV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tHEV5EmLyJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.x61NQPdsIT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.tHEV5EmLyJ ++ cat /tmp/tmp.x61NQPdsIT ++ rm /tmp/tmp.tHEV5EmLyJ /tmp/tmp.x61NQPdsIT ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SLfLCFlEPE +++ mktemp ++ local LAST_ERR=/tmp/tmp.tNekMYxfoC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.SLfLCFlEPE ++ cat /tmp/tmp.tNekMYxfoC ++ rm /tmp/tmp.SLfLCFlEPE /tmp/tmp.tNekMYxfoC ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine sec-context +++ local cluster_name=sec-context ++++ get_proxy sec-context ++++ local target_cluster=sec-context +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.5UrSfYYDyA ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ivqy3mDHoS +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.5UrSfYYDyA +++++ cat /tmp/tmp.ivqy3mDHoS +++++ rm /tmp/tmp.5UrSfYYDyA /tmp/tmp.ivqy3mDHoS +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.jDxumcmGXf ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.TU8ZgQjUcB +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.jDxumcmGXf +++++ cat /tmp/tmp.TU8ZgQjUcB +++++ rm /tmp/tmp.jDxumcmGXf /tmp/tmp.TU8ZgQjUcB +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo sec-context-proxysql ++++ return +++ local cluster_proxy=sec-context-proxysql +++ echo proxysql ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r5Hr5B9kQM +++ mktemp ++ local LAST_ERR=/tmp/tmp.h9OmeIfBsS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.r5Hr5B9kQM ++ cat /tmp/tmp.h9OmeIfBsS ++ rm /tmp/tmp.r5Hr5B9kQM /tmp/tmp.h9OmeIfBsS ++ return 0 + [[ 2 == \2 ]] + desc 'run pvc backup' + set +o xtrace ----------------------------------------------------------------------------------- run pvc backup ----------------------------------------------------------------------------------- + backup=on-demand-backup-pvc + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-on-demand-backup-pvc.yml ++ mktemp + local LAST_OUT=/tmp/tmp.QsOQbOUwHV ++ mktemp + local LAST_ERR=/tmp/tmp.8ciIOw7kRJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-on-demand-backup-pvc.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.QsOQbOUwHV perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-pvc created + cat /tmp/tmp.8ciIOw7kRJ + rm /tmp/tmp.QsOQbOUwHV /tmp/tmp.8ciIOw7kRJ + return 0 + wait_backup on-demand-backup-pvc + local backup=on-demand-backup-pvc + set +o xtrace on-demand-backup-pvc...................Succeeded + compare_kubectl job.batch/xb-on-demand-backup-pvc + local resource=job.batch/xb-on-demand-backup-pvc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc.yml + local new_result=/tmp/tmp.woNsh8iaMb/job.batch_xb-on-demand-backup-pvc.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml job.batch/xb-on-demand-backup-pvc + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."percona.com/*"' + yq d - '**.finalizers' + yq d - '**.healthCheckNodePort' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.nodePort' + yq d - '**.creationTimestamp' + yq d - '**.imagePullSecrets' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.enableServiceLinks' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==suffix)' + yq d - status + yq d - spec.nodeName + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - '**.creationTimestamp' + /usr/bin/sed s/security-context-10689/namespace/g + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.SxW4t1nxNX ++ mktemp + local LAST_ERR=/tmp/tmp.LrKOUPZ8yV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml job.batch/xb-on-demand-backup-pvc + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.SxW4t1nxNX + cat /tmp/tmp.LrKOUPZ8yV + rm /tmp/tmp.SxW4t1nxNX /tmp/tmp.LrKOUPZ8yV + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-pvc.yml /tmp/tmp.woNsh8iaMb/job.batch_xb-on-demand-backup-pvc.yml ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3wNJ8RaAvP +++ mktemp ++ local LAST_ERR=/tmp/tmp.LspWY8vHPd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.3wNJ8RaAvP ++ cat /tmp/tmp.LspWY8vHPd ++ rm /tmp/tmp.3wNJ8RaAvP /tmp/tmp.LspWY8vHPd ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=security-context-10689 ++ mktemp + local LAST_OUT=/tmp/tmp.VfNn4z5L8Y ++ mktemp + local LAST_ERR=/tmp/tmp.Or6Uj3wqw1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling --namespace=security-context-10689 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.VfNn4z5L8Y Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-scaling" modified. + cat /tmp/tmp.Or6Uj3wqw1 + rm /tmp/tmp.VfNn4z5L8Y /tmp/tmp.Or6Uj3wqw1 + return 0 + desc 'run pvc restore' + set +o xtrace ----------------------------------------------------------------------------------- run pvc restore ----------------------------------------------------------------------------------- + restore=restore-pvc + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-restore-pvc.yml ++ mktemp + local LAST_OUT=/tmp/tmp.AVUzDocrg0 ++ mktemp + local LAST_ERR=/tmp/tmp.IMRTGWvJEB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-restore-pvc.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.AVUzDocrg0 perconaxtradbclusterrestore.pxc.percona.com/restore-pvc created + cat /tmp/tmp.IMRTGWvJEB + rm /tmp/tmp.AVUzDocrg0 /tmp/tmp.IMRTGWvJEB + return 0 + wait_pod restore-src-restore-pvc-sec-context + local pod=restore-src-restore-pvc-sec-context + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo restore-src-restore-pvc-sec-context + local container= + set +o xtrace restore-src-restore-pvc-sec-context..........................Ok + kubectl_bin get -o yaml pod/restore-src-restore-pvc-sec-context ++ mktemp + local LAST_OUT=/tmp/tmp.Eu5Uink01D ++ mktemp + local LAST_ERR=/tmp/tmp.9eamGqP7pI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pod/restore-src-restore-pvc-sec-context + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.Eu5Uink01D apiVersion: v1 kind: Pod metadata: annotations: openshift.io/scc: privileged creationTimestamp: "2022-04-14T12:31:39Z" labels: name: restore-src-restore-pvc-sec-context managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:openshift.io/scc: {} f:labels: .: {} f:name: {} f:ownerReferences: .: {} k:{"uid":"cf437767-9cca-4354-b611-a736d75b8794"}: .: {} f:apiVersion: {} f:controller: {} f:kind: {} f:name: {} f:uid: {} f:spec: f:containers: k:{"name":"ncat"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: .: {} f:privileged: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/backup"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/mysql/ssl"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/mysql/ssl-internal"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/mysql/vault-keyring-secret"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: .: {} f:fsGroup: {} f:supplementalGroups: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"backup"}: .: {} f:name: {} f:persistentVolumeClaim: .: {} f:claimName: {} k:{"name":"ssl"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:optional: {} f:secretName: {} k:{"name":"ssl-internal"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:optional: {} f:secretName: {} k:{"name":"vault-keyring-secret"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:optional: {} f:secretName: {} manager: percona-xtradb-cluster-operator operation: Update time: "2022-04-14T12:31:39Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.120.0.12"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update time: "2022-04-14T12:31:53Z" name: restore-src-restore-pvc-sec-context namespace: security-context-10689 ownerReferences: - apiVersion: pxc.percona.com/v1 controller: true kind: PerconaXtraDBClusterRestore name: restore-pvc uid: cf437767-9cca-4354-b611-a736d75b8794 resourceVersion: "8177" uid: d85431e4-15aa-49b3-b40f-a24463162855 spec: containers: - command: - recovery-pvc-donor.sh image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imagePullPolicy: Always name: ncat resources: {} securityContext: privileged: true terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /backup name: backup - mountPath: /etc/mysql/ssl name: ssl - mountPath: /etc/mysql/ssl-internal name: ssl-internal - mountPath: /etc/mysql/vault-keyring-secret name: vault-keyring-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: percona-xtradb-cluster-operator-workload-token-r8mvw readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: gke-jenkins-pxc-706f792a-default-pool-c2a87783-2nl6 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1001 supplementalGroups: - 1001 - 1002 - 1003 serviceAccount: percona-xtradb-cluster-operator-workload serviceAccountName: percona-xtradb-cluster-operator-workload terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: backup persistentVolumeClaim: claimName: xb-on-demand-backup-pvc - name: ssl-internal secret: defaultMode: 420 optional: true secretName: some-name-ssl-internal - name: ssl secret: defaultMode: 420 optional: false secretName: some-name-ssl - name: vault-keyring-secret secret: defaultMode: 420 optional: true secretName: sec-context-vault - name: percona-xtradb-cluster-operator-workload-token-r8mvw secret: defaultMode: 420 secretName: percona-xtradb-cluster-operator-workload-token-r8mvw status: conditions: - lastProbeTime: null lastTransitionTime: "2022-04-14T12:31:39Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2022-04-14T12:31:53Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2022-04-14T12:31:53Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2022-04-14T12:31:39Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://480da5e271bbef7d5aa4558b2ccfaeaadca2495189f3c4ac76852d3684a917b0 image: docker.io/perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imageID: docker.io/perconalab/percona-xtradb-cluster-operator@sha256:99699ba9cf01cc62e6c92bb52c2e58652f156455d315996f1c467118517e96d8 lastState: {} name: ncat ready: true restartCount: 0 started: true state: running: startedAt: "2022-04-14T12:31:52Z" hostIP: 10.172.0.77 phase: Running podIP: 10.120.0.12 podIPs: - ip: 10.120.0.12 qosClass: BestEffort startTime: "2022-04-14T12:31:39Z" + cat /tmp/tmp.9eamGqP7pI + rm /tmp/tmp.Eu5Uink01D /tmp/tmp.9eamGqP7pI + return 0 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + compare_kubectl pod/restore-src-restore-pvc-sec-context -120 + local resource=pod/restore-src-restore-pvc-sec-context + local postfix=-120 + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-120.yml + local new_result=/tmp/tmp.woNsh8iaMb/pod_restore-src-restore-pvc-sec-context.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-120-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-120-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-120-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml pod/restore-src-restore-pvc-sec-context + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.finalizers' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**."percona.com/*"' + yq d - '**.volumeName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.healthCheckNodePort' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.nodePort' + yq d - '**.dataSource' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.imagePullSecrets' + yq d - '**.creationTimestamp' + yq d - '**.enableServiceLinks' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - status + yq d - '**.storageClassName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==suffix)' + yq d - '**.procMount' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.controller-uid' + yq d - spec.ipFamilies + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/security-context-10689/namespace/g + yq d - '**.creationTimestamp' ++ mktemp + local LAST_OUT=/tmp/tmp.uvuupVzksI + yq d - metadata.managedFields ++ mktemp + local LAST_ERR=/tmp/tmp.WarFNaoehH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml pod/restore-src-restore-pvc-sec-context + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.uvuupVzksI + cat /tmp/tmp.WarFNaoehH + rm /tmp/tmp.uvuupVzksI /tmp/tmp.WarFNaoehH + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/pod_restore-src-restore-pvc-sec-context-120.yml /tmp/tmp.woNsh8iaMb/pod_restore-src-restore-pvc-sec-context.yml + wait_backup_restore restore-pvc + local backup_name=restore-pvc + set +o xtrace restore-pvc....................................................................................................................Succeeded + compare_kubectl job.batch/restore-job-restore-pvc-sec-context + local resource=job.batch/restore-job-restore-pvc-sec-context + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context.yml + local new_result=/tmp/tmp.woNsh8iaMb/job.batch_restore-job-restore-pvc-sec-context.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml job.batch/restore-job-restore-pvc-sec-context + yq d - '**.creationTimestamp' ++ mktemp + local LAST_OUT=/tmp/tmp.0y9u7eAa6E + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' ++ mktemp + local LAST_ERR=/tmp/tmp.ZIav80WUBZ + local exit_status=0 + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - spec.nodeName + yq d - '**."percona.com/*"' + yq d - '**.namespace' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.nodePort' + yq d - spec.volumeMode + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.procMount' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml job.batch/restore-job-restore-pvc-sec-context + yq d - '**.storageClassName' + yq d - '**.finalizers' + yq d - status + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.(name==NAMESPACE)' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - '**.volumeName' + yq d - metadata.selfLink + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.enableServiceLinks' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==suffix)' + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - metadata.resourceVersion + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - '**.uid' + /usr/bin/sed s/security-context-10689/namespace/g + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.0y9u7eAa6E + cat /tmp/tmp.ZIav80WUBZ + rm /tmp/tmp.0y9u7eAa6E /tmp/tmp.ZIav80WUBZ + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-pvc-sec-context.yml /tmp/tmp.woNsh8iaMb/job.batch_restore-job-restore-pvc-sec-context.yml + desc 'run s3 backup' + set +o xtrace ----------------------------------------------------------------------------------- run s3 backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MOHkfOoRHK ++ mktemp + local LAST_ERR=/tmp/tmp.r4PiOfB3yS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MOHkfOoRHK secret/minio-secret unchanged + cat /tmp/tmp.r4PiOfB3yS + rm /tmp/tmp.MOHkfOoRHK /tmp/tmp.r4PiOfB3yS + return 0 + start_minio + deploy_helm security-context-10689 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add percona https://percona-charts.storage.googleapis.com/ "percona" already exists with the same configuration, skipping + helm repo add minio https://helm.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "minio" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 8.0.5 --set accessKey=some-access-key --set secretKey=some-secret-key --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set securityContext.enabled=false --set persistence.size=2G --set environment.MINIO_REGION=us-east-1 --set environment.MINIO_HTTP_TRACE=/tmp/trace.log minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 8.0.5 --set accessKey=some-access-key --set secretKey=some-secret-key --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set securityContext.enabled=false --set persistence.size=2G --set environment.MINIO_REGION=us-east-1 --set environment.MINIO_HTTP_TRACE=/tmp/trace.log minio/minio NAME: minio-service LAST DEPLOYED: Thu Apr 14 12:35:42 2022 NAMESPACE: security-context-10689 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Minio can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.security-context-10689.svc.cluster.local To access Minio from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace security-context-10689 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace security-context-10689 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access Minio server on http://localhost:9000. Follow the below steps to connect to Minio server with mc client: 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide 2. Get the ACCESS_KEY=$(kubectl get secret minio-service -o jsonpath="{.data.accesskey}" | base64 --decode) and the SECRET_KEY=$(kubectl get secret minio-service -o jsonpath="{.data.secretkey}" | base64 --decode) 3. mc alias set minio-service-local http://localhost:9000 "$ACCESS_KEY" "$SECRET_KEY" --api s3v4 4. mc ls minio-service-local Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BJMUbkXXGU +++ mktemp ++ local LAST_ERR=/tmp/tmp.AfVZ29FuPM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.BJMUbkXXGU ++ cat /tmp/tmp.AfVZ29FuPM ++ rm /tmp/tmp.BJMUbkXXGU /tmp/tmp.AfVZ29FuPM ++ return 0 + MINIO_POD=minio-service-569fc47cb6-7xrn9 + wait_pod minio-service-569fc47cb6-7xrn9 + local pod=minio-service-569fc47cb6-7xrn9 + local max_retry=480 + local ns= ++ echo minio-service-569fc47cb6-7xrn9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace minio-service-569fc47cb6-7xrn9.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.bVEvNkMnjy ++ mktemp + local LAST_ERR=/tmp/tmp.r47F2IwnT2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.bVEvNkMnjy make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.r47F2IwnT2 If you don't see a command prompt, try pressing enter. Error attaching, falling back to logs: Internal error occurred: error attaching to container: container is in CONTAINER_EXITED state + rm /tmp/tmp.bVEvNkMnjy /tmp/tmp.r47F2IwnT2 + return 0 + wait_cluster_consistency sec-context 3 2 + local cluster_name=sec-context + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + sleep 7 ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LL94WljxEr +++ mktemp ++ local LAST_ERR=/tmp/tmp.uklX3PMs9o ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.LL94WljxEr ++ cat /tmp/tmp.uklX3PMs9o ++ rm /tmp/tmp.LL94WljxEr /tmp/tmp.uklX3PMs9o ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X1dd1XzOo0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gpFTfxjkva ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.X1dd1XzOo0 ++ cat /tmp/tmp.gpFTfxjkva ++ rm /tmp/tmp.X1dd1XzOo0 /tmp/tmp.gpFTfxjkva ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine sec-context +++ local cluster_name=sec-context ++++ get_proxy sec-context ++++ local target_cluster=sec-context +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.WftiHZtIg1 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ZvSb6x7R2f +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.WftiHZtIg1 +++++ cat /tmp/tmp.ZvSb6x7R2f +++++ rm /tmp/tmp.WftiHZtIg1 /tmp/tmp.ZvSb6x7R2f +++++ return 0 ++++ [[ '' == \t\r\u\e ]] +++++ kubectl_bin get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.3QZzHY8nEw ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.2zDA3GdPCF +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc sec-context -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.3QZzHY8nEw +++++ cat /tmp/tmp.2zDA3GdPCF +++++ rm /tmp/tmp.3QZzHY8nEw /tmp/tmp.2zDA3GdPCF +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo sec-context-proxysql ++++ return +++ local cluster_proxy=sec-context-proxysql +++ echo proxysql ++ kubectl_bin get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NQqKscmA4j +++ mktemp ++ local LAST_ERR=/tmp/tmp.MCb1trHrdH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc sec-context -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.NQqKscmA4j ++ cat /tmp/tmp.MCb1trHrdH ++ rm /tmp/tmp.NQqKscmA4j /tmp/tmp.MCb1trHrdH ++ return 0 + [[ 2 == \2 ]] + backup=on-demand-backup-s3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-on-demand-backup-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.lQkRDa8Ojr ++ mktemp + local LAST_ERR=/tmp/tmp.k8PGtLqIvN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-on-demand-backup-s3.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.lQkRDa8Ojr perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-s3 created + cat /tmp/tmp.k8PGtLqIvN + rm /tmp/tmp.lQkRDa8Ojr /tmp/tmp.k8PGtLqIvN + return 0 + wait_backup on-demand-backup-s3 + local backup=on-demand-backup-s3 + set +o xtrace on-demand-backup-s3.............Succeeded + compare_kubectl job.batch/xb-on-demand-backup-s3 + local resource=job.batch/xb-on-demand-backup-s3 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3.yml + local new_result=/tmp/tmp.woNsh8iaMb/job.batch_xb-on-demand-backup-s3.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ bc -l ++ echo '1.20 >= 1.21' + '[' 0 -eq 1 ']' + return 1 + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."percona.com/*"' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.creationTimestamp' + yq d - spec.volumeMode + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.healthCheckNodePort' + yq d - '**.storageClassName' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.finalizers' + yq d - '**.nodePort' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.uid' + yq d - '**.volumeName' + yq d - '**.imagePullSecrets' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.namespace' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.enableServiceLinks' + yq d - '**.controller-uid' + yq d - status + yq d - '**.preemptionPolicy' + yq d - '**.creationTimestamp' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==suffix)' + yq d - spec.ipFamilyPolicy + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - spec.ipFamilies + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - metadata.managedFields + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed s/security-context-10689/namespace/g + kubectl_bin get -o yaml job.batch/xb-on-demand-backup-s3 ++ mktemp + local LAST_OUT=/tmp/tmp.U6l8NIEt5G ++ mktemp + local LAST_ERR=/tmp/tmp.MxtJmq2CJi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml job.batch/xb-on-demand-backup-s3 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.U6l8NIEt5G + cat /tmp/tmp.MxtJmq2CJi + rm /tmp/tmp.U6l8NIEt5G /tmp/tmp.MxtJmq2CJi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_xb-on-demand-backup-s3.yml /tmp/tmp.woNsh8iaMb/job.batch_xb-on-demand-backup-s3.yml + desc 'run s3 restore' + set +o xtrace ----------------------------------------------------------------------------------- run s3 restore ----------------------------------------------------------------------------------- + restore=restore-s3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-restore-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.atHxrcPanf ++ mktemp + local LAST_ERR=/tmp/tmp.9bTJZ208Kz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/conf/sec-context-restore-s3.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.atHxrcPanf perconaxtradbclusterrestore.pxc.percona.com/restore-s3 created + cat /tmp/tmp.9bTJZ208Kz + rm /tmp/tmp.atHxrcPanf /tmp/tmp.9bTJZ208Kz + return 0 + wait_backup_restore restore-s3 + local backup_name=restore-s3 + set +o xtrace restore-s3.........................................................................................................................................................Succeeded + compare_kubectl job.batch/restore-job-restore-s3-sec-context + local resource=job.batch/restore-job-restore-s3-sec-context + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context.yml + local new_result=/tmp/tmp.woNsh8iaMb/job.batch_restore-job-restore-s3-sec-context.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml job.batch/restore-job-restore-s3-sec-context + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.healthCheckNodePort' + yq d - '**."percona.com/*"' + yq d - '**.nodePort' + yq d - '**.storageClassName' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==NAMESPACE)' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.finalizers' + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.imagePullSecrets' + yq d - '**.enableServiceLinks' + yq d - spec.ipFamilies + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.controller-uid' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - '**.preemptionPolicy' + yq d - status + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/security-context-10689/namespace/g + yq d - '**.creationTimestamp' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.ttoFheMRSx ++ mktemp + local LAST_ERR=/tmp/tmp.gcNQC2BAuo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml job.batch/restore-job-restore-s3-sec-context + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ttoFheMRSx + cat /tmp/tmp.gcNQC2BAuo + rm /tmp/tmp.ttoFheMRSx /tmp/tmp.gcNQC2BAuo + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/security-context/compare/job.batch_restore-job-restore-s3-sec-context.yml /tmp/tmp.woNsh8iaMb/job.batch_restore-job-restore-s3-sec-context.yml + [[ -n '' ]] + destroy security-context-10689 + local namespace=security-context-10689 + local ignore_logs=false + [[ false == \f\a\l\s\e ]] + grep -v level=info + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.woNsh8iaMb/operator.log ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + grep -v 'the object has been modified' +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nr1qGOZC77 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QvJpJpZmPb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.Nr1qGOZC77 ++ cat /tmp/tmp.QvJpJpZmPb ++ rm /tmp/tmp.Nr1qGOZC77 /tmp/tmp.QvJpJpZmPb ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-n47nz ++ mktemp + local LAST_OUT=/tmp/tmp.5FK3iK7bKX ++ mktemp + local LAST_ERR=/tmp/tmp.scePVFGaqy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-n47nz + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.5FK3iK7bKX + cat /tmp/tmp.scePVFGaqy + rm /tmp/tmp.5FK3iK7bKX /tmp/tmp.scePVFGaqy + return 0 I0414 12:17:50.255690 1 request.go:645] Throttling request took 1.041545559s, request: GET:https://10.123.240.1:443/apis/pxc.percona.com/v1-7-0?timeout=32s I0414 12:19:37.952589 1 request.go:645] Throttling request took 1.038249268s, request: GET:https://10.123.240.1:443/apis/cloud.google.com/v1beta1?timeout=32s {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: command terminated with exit code 1 / / ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 0\nERROR (line:581) : ProxySQL connection check failed. \n-- Could not connect to ProxySQL at localhost:6032 \n-- Please check the ProxySQL connection parameters and status.\n","errorVerbose":"exec syncusers: command terminated with exit code 1 / / ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 0\nERROR (line:581) : ProxySQL connection check failed. \n-- Could not connect to ProxySQL at localhost:6032 \n-- Please check the ProxySQL connection parameters and status.\n\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"error",,"caller":"pxc/controller.go:1142","msg":"sync users","error":"exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / ","errorVerbose":"exec syncusers: unable to upgrade connection: container not found (\"proxysql\") / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:491\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1140\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1581","stacktrace":"github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:1142"} {"level":"info",,"caller":"pxcbackup/controller.go:212","msg":"Creating a new volume for backup","Namespace":"security-context-10689","Name":"xb-on-demand-backup-pvc"} {"level":"info",,"caller":"pxcbackup/controller.go:248","msg":"Created a new backup job","Namespace":"security-context-10689","Name":"xb-on-demand-backup-pvc"} {"level":"info",,"caller":"pxcbackup/controller.go:248","msg":"Created a new backup job","Namespace":"security-context-10689","Name":"xb-on-demand-backup-s3"} {"level":"info",,"caller":"pxc/backup.go:87","msg":"Creating or updating backup job","name":"adc5a-each-hour-pvc","schedule":"0 */1 * * *"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"failed to connect to pod sec-context-pxc-0: dial tcp 10.120.0.9:33062: connect: connection refused"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"failed to connect to pod sec-context-pxc-0: dial tcp: lookup sec-context-pxc-0.sec-context-pxc.security-context-10689 on 10.123.240.10:53: no such host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"failed to ensure cluster readonly status: connect to pod sec-context-pxc-1: dial tcp: lookup sec-context-pxc-1.sec-context-pxc.security-context-10689 on 10.123.240.10:53: no such host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"failed to ensure cluster readonly status: connect to pod sec-context-pxc-2: dial tcp: lookup sec-context-pxc-2.sec-context-pxc.security-context-10689 on 10.123.240.10:53: no such host"} {"level":"info",,"caller":"pxc/controller.go:468","msg":"reconcile replication error","err":"get primary pxc pod: not found"} {"level":"info",,"caller":"pxc/replication.go:145","msg":"Unable to find primary pod for replication. No pod with name or ip like this","primary name":"sec-context-pxc-0.sec-context-pxc.security-context-10689.svc.cluster.local"} {"level":"info",,"caller":"pxcrestore/controller.go:121","msg":"backup restore request"} {"level":"info",,"caller":"pxcrestore/controller.go:181","msg":"stopping cluster","cluster":"sec-context"} {"level":"info",,"caller":"pxcrestore/controller.go:193","msg":"starting restore","cluster":"sec-context","backup":"on-demand-backup-pvc"} {"level":"info",,"caller":"pxcrestore/controller.go:193","msg":"starting restore","cluster":"sec-context","backup":"on-demand-backup-s3"} {"level":"info",,"caller":"pxcrestore/controller.go:205","msg":"starting cluster","cluster":"sec-context"} {"level":"info",,"caller":"pxcrestore/controller.go:243","msg":"You can view xtrabackup log:\n$ kubectl logs job/restore-job-restore-pvc-sec-context\nIf everything is fine, you can cleanup the job:\n$ kubectl delete pxc-restore/restore-pvc\n"} {"level":"info",,"caller":"pxcrestore/controller.go:243","msg":"You can view xtrabackup log:\n$ kubectl logs job/restore-job-restore-s3-sec-context\nIf everything is fine, you can cleanup the job:\n$ kubectl delete pxc-restore/restore-s3\n"} {"level":"info",,"caller":"pxc/version.go:328","msg":"update PXC version (fetched from db)","new version":"8.0.27-18.1"} {"level":"info",,"caller":"v1/pxc_types.go:874","msg":"ProxySQL size will be changed from 1 to 2 due to safe config"} {"level":"info",,"caller":"v1/pxc_types.go:875","msg":"Set allowUnsafeConfigurations=true to disable safe configuration"} {"level":"info",,"logger":"cmd","msg":"Git commit: 706f792ae47c369cb3556faff186b6873a8a247f Git branch: PR-1125-706f792a Build time: 2022-04-14T12:06:57Z"} {"level":"info",,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info",,"logger":"cmd","msg":"Go Version: go1.17.9"} {"level":"info",,"logger":"cmd","msg":"operator-sdk Version: v0.19.4"} {"level":"info",,"logger":"cmd","msg":"Registering Components."} {"level":"info",,"logger":"cmd","msg":"Runs on","platform":"kubernetes","version":"v1.20.15-gke.4100"} {"level":"info",,"logger":"cmd","msg":"Starting the Cmd."} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} {"level":"info",,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"registering webhook","path":"/validate-percona-xtradbcluster"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"serving webhook server","host":"","port":9443} {"level":"info",,"logger":"controller-runtime.webhook.webhooks","msg":"starting webhook server"} {"level":"info",,"logger":"leader","msg":"Became the leader."} {"level":"info",,"logger":"leader","msg":"No pre-existing lock was found."} {"level":"info",,"logger":"leader","msg":"Trying to become the leader."} + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n security-context-10689 sec-context --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/sec-context patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.MXDtkkWYNM ++ mktemp + local LAST_ERR=/tmp/tmp.bm3cgSjrgT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MXDtkkWYNM perconaxtradbcluster.pxc.percona.com "sec-context" deleted + cat /tmp/tmp.bm3cgSjrgT + rm /tmp/tmp.MXDtkkWYNM /tmp/tmp.bm3cgSjrgT + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ueEtCTYKU1 ++ mktemp + local LAST_ERR=/tmp/tmp.5CeqOw9A2M + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ueEtCTYKU1 perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-pvc" deleted perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-s3" deleted + cat /tmp/tmp.5CeqOw9A2M + rm /tmp/tmp.ueEtCTYKU1 /tmp/tmp.5CeqOw9A2M + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4UibCYQxjY ++ mktemp + local LAST_ERR=/tmp/tmp.jSMW1J9zXe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.4UibCYQxjY perconaxtradbclusterrestore.pxc.percona.com "restore-pvc" deleted perconaxtradbclusterrestore.pxc.percona.com "restore-s3" deleted + cat /tmp/tmp.jSMW1J9zXe + rm /tmp/tmp.4UibCYQxjY /tmp/tmp.jSMW1J9zXe + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.fplM7qTbFm ++ mktemp + local LAST_ERR=/tmp/tmp.BDEsE9RdCG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.fplM7qTbFm validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.BDEsE9RdCG + rm /tmp/tmp.fplM7qTbFm /tmp/tmp.BDEsE9RdCG + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml namespace "cert-manager" deleted + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace security-context-10689 + rm -rf /tmp/tmp.woNsh8iaMb ++ mktemp + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FyaakKj3eE + local LAST_OUT=/tmp/tmp.cwt750Sa6n ++ mktemp + local LAST_ERR=/tmp/tmp.9JHIqF2Y3n + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.pvCNtTqsnN + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace pxc-operator + kubectl delete --grace-period=0 --force=true namespace security-context-10689