Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1781/e2e-tests/logs/security-context-8-0.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified No resources found No resources found No resources found error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- Error from server (NotFound): namespaces "pxc-operator" not found namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- namespace "gmp-public" deleted namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted namespace/pxc-operator created Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1781-4641617b-1-cluster4" modified. ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created pod/percona-xtradb-cluster-operator-86c87db468-pxfg9 condition met pod/percona-xtradb-cluster-operator-86c87db468-pxfg9 condition met percona-xtradb-cluster-operator-86c87db468-pxfg9.Ok error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces security-context-245 ----------------------------------------------------------------------------------- Error from server (NotFound): namespaces "security-context-245" not found namespace/security-context-245 - Error from server (NotFound): namespaces "security-context-245" not found ----------------------------------------------------------------------------------- create namespace security-context-245 ----------------------------------------------------------------------------------- namespace "gmp-public" deleted namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted namespace/security-context-245 created Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1781-4641617b-1-cluster4" modified. ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- namespace/cert-manager created namespace/cert-manager labeled namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. serviceaccount/percona-xtradb-cluster-operator-workload created ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- secret/my-cluster-secrets created deployment.apps/pxc-client created perconaxtradbcluster.pxc.percona.com/sec-context created ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- error: no matching resources found ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- Error from server (NotFound): pods "sec-context-proxysql-0" not found sec-context-proxysql-0............Ok ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- pod/sec-context-pxc-0 condition met sec-context-pxc-0.Ok pod/sec-context-pxc-1 condition met sec-context-pxc-1.Ok pod/sec-context-pxc-2 condition met sec-context-pxc-2.Ok ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- pod/pxc-client-65c795cbdf-5fr7h condition met pxc-client-65c795cbdf-5fr7h.Ok pod/pxc-client-65c795cbdf-5fr7h condition met pxc-client-65c795cbdf-5fr7h.Ok pod/pxc-client-65c795cbdf-5fr7h condition met pxc-client-65c795cbdf-5fr7h.Ok pod/pxc-client-65c795cbdf-5fr7h condition met pxc-client-65c795cbdf-5fr7h.Ok pod/pxc-client-65c795cbdf-5fr7h condition met pxc-client-65c795cbdf-5fr7h.Ok Unable to use a TTY - input is not a terminal or the right kind of file ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- compare statefulset/sec-context-pxc- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- compare statefulset/sec-context-proxysql- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- change security context in PXC cluster ----------------------------------------------------------------------------------- perconaxtradbcluster.pxc.percona.com/sec-context configured ----------------------------------------------------------------------------------- check if service and statefulset chenged to expected config ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- compare statefulset/sec-context-pxc--changes ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- compare statefulset/sec-context-proxysql--changes ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- waiting for cluster readyness waiting for cluster readyness waiting for cluster readyness waiting for cluster readyness waiting for cluster readyness waiting for cluster readyness ----------------------------------------------------------------------------------- run pvc backup ----------------------------------------------------------------------------------- perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-pvc created on-demand-backup-pvc...................Succeeded ----------------------------------------------------------------------------------- compare job.batch/xb-on-demand-backup-pvc- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1781-4641617b-1-cluster4" modified. ----------------------------------------------------------------------------------- run pvc restore ----------------------------------------------------------------------------------- perconaxtradbclusterrestore.pxc.percona.com/restore-pvc created Error from server (NotFound): pods "restore-src-restore-pvc-sec-context" not found restore-src-restore-pvc-sec-context..............................Ok apiVersion: v1 kind: Pod metadata: annotations: openshift.io/scc: privileged creationTimestamp: "2024-08-08T03:06:21Z" labels: name: restore-src-restore-pvc-sec-context name: restore-src-restore-pvc-sec-context namespace: security-context-245 ownerReferences: - apiVersion: pxc.percona.com/v1 controller: true kind: PerconaXtraDBClusterRestore name: restore-pvc uid: faad7ec9-9e45-4799-83ac-ac895e612222 resourceVersion: "38063" uid: c46b7152-ea98-4cc5-b5bd-36e027af2189 spec: containers: - command: - recovery-pvc-donor.sh image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imagePullPolicy: Always name: ncat resources: {} securityContext: privileged: true terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /backup name: backup - mountPath: /etc/mysql/ssl name: ssl - mountPath: /etc/mysql/ssl-internal name: ssl-internal - mountPath: /etc/mysql/vault-keyring-secret name: vault-keyring-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-rldwz readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: gke-jen-pxc-1781-4641617-default-pool-e0b3df4a-v7lb preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1001 supplementalGroups: - 1001 - 1002 - 1003 serviceAccount: percona-xtradb-cluster-operator-workload serviceAccountName: percona-xtradb-cluster-operator-workload terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: backup persistentVolumeClaim: claimName: xb-on-demand-backup-pvc - name: ssl-internal secret: defaultMode: 420 optional: true secretName: some-name-ssl-internal - name: ssl secret: defaultMode: 420 optional: false secretName: some-name-ssl - name: vault-keyring-secret secret: defaultMode: 420 optional: true secretName: sec-context-vault - name: kube-api-access-rldwz projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2024-08-08T03:06:21Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2024-08-08T03:06:30Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2024-08-08T03:06:30Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2024-08-08T03:06:21Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://5850db93133f3a1ff0e032453b925d8235dabbbc4214c315ba1327e77ba97acb image: docker.io/perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup imageID: docker.io/perconalab/percona-xtradb-cluster-operator@sha256:6395019766db6118f61b8085c720d1d574c73511f84c9db5d222460f790649fb lastState: {} name: ncat ready: true restartCount: 0 started: true state: running: startedAt: "2024-08-08T03:06:30Z" hostIP: 10.211.0.69 phase: Running podIP: 10.195.176.58 podIPs: - ip: 10.195.176.58 qosClass: BestEffort startTime: "2024-08-08T03:06:21Z" ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- compare pod/restore-src-restore-pvc-sec-context- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- wait backup restore ----------------------------------------------------------------------------------- restore-pvc.........................................................................................................Succeeded ----------------------------------------------------------------------------------- compare job.batch/restore-job-restore-pvc-sec-context- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- run s3 backup ----------------------------------------------------------------------------------- secret/minio-secret unchanged "hashicorp" already exists with the same configuration, skipping "minio" already exists with the same configuration, skipping Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- Error: uninstall: Release not loaded: minio-service: release: not found NAME: minio-service LAST DEPLOYED: Thu Aug 8 03:10:10 2024 NAMESPACE: security-context-245 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.security-context-245.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace security-context-245 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace security-context-245 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace security-context-245 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace security-context-245 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local pod/minio-service-6785948d49-hcwc5 condition met minio-service-6785948d49-hcwc5.Ok make_bucket: operator-testing pod "aws-cli" deleted If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: error attaching to container: container is in CONTAINER_EXITED state ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-s3 created on-demand-backup-s3...............Succeeded ----------------------------------------------------------------------------------- compare job.batch/xb-on-demand-backup-s3- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- run s3 restore ----------------------------------------------------------------------------------- perconaxtradbclusterrestore.pxc.percona.com/restore-s3 created ----------------------------------------------------------------------------------- wait backup restore ----------------------------------------------------------------------------------- restore-s3......................................................................................................................................Succeeded ----------------------------------------------------------------------------------- compare job.batch/restore-job-restore-s3-sec-context- ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + kubectl patch pxc -n security-context-245 sec-context --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/sec-context patched perconaxtradbcluster.pxc.percona.com "sec-context" deleted perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-pvc" deleted perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-s3" deleted perconaxtradbclusterrestore.pxc.percona.com "restore-pvc" deleted perconaxtradbclusterrestore.pxc.percona.com "restore-s3" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted ----------------------------------------------------------------------------------- test passed -----------------------------------------------------------------------------------