# ClusterRole, ClusterRoleBinding and ServiceAccounts have hook-failed in # hook-delete-policy to make it easy to rerun the whole setup even after a # failure, else the rerun fails with existing resource error. # Hook delete policy before-hook-creation ensures any other leftover resources # from previous run gets deleted when run again. # The Job resources will not be deleted to help investigage the failure. # Since the resources created by the operator are not managed by the chart, each # of them must be individually deleted in separate jobs. apiVersion: v1 kind: ServiceAccount metadata: name: storageos-cleanup namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "1" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: storageos:cleanup annotations: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "1" rules: # Using apiGroup "apps" for daemonsets fails and the permission error indicates # that it's in group "extensions". Not sure if it's a Job specific behavior, # because the daemonsets deployed by the operator use "apps" apiGroup. - apiGroups: - "" resources: - pods verbs: - list - apiGroups: - extensions resources: - daemonsets - deployments verbs: - delete - apiGroups: - apps resources: - statefulsets - deployments - daemonsets verbs: - delete - apiGroups: - rbac.authorization.k8s.io resources: - roles - rolebindings - clusterroles - clusterrolebindings verbs: - delete - apiGroups: - storage.k8s.io resources: - storageclasses verbs: - delete - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - delete - apiGroups: - "" resources: - serviceaccounts - secrets - services - configmaps verbs: - delete - apiGroups: - storageos.com resources: - storageosclusters verbs: - get - patch - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: storageos:cleanup annotations: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "2" subjects: - name: storageos-cleanup kind: ServiceAccount namespace: {{ .Release.Namespace }} roleRef: name: storageos:cleanup kind: ClusterRole apiGroup: rbac.authorization.k8s.io --- {{- if .Values.cluster.create }} # Delete the CR apiVersion: batch/v1 kind: Job metadata: name: "storageos-storageoscluster-cleanup" namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" "helm.sh/hook-weight": "3" spec: template: spec: serviceAccountName: storageos-cleanup containers: - name: "storageos-storageoscluster-cleanup" image: "{{ $.Values.images.kubectl.registry }}/{{ $.Values.images.kubectl.image }}:{{ $.Values.images.kubectl.tag }}" command: - kubectl - -n - {{ template "cluster.namespace" . }} - delete - storageoscluster - {{ .Values.cluster.name }} - --ignore-not-found=true restartPolicy: Never backoffLimit: 4 --- # Wait for the operator to appropriately delete resources based on CR deletion apiVersion: batch/v1 kind: Job metadata: name: "storageos-cleanup-wait" namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" "helm.sh/hook-weight": "4" spec: template: spec: serviceAccountName: storageos-cleanup containers: - name: "storageos-cleanup-wait" image: "{{ $.Values.images.kubectl.registry }}/{{ $.Values.images.kubectl.image }}:{{ $.Values.images.kubectl.tag }}" command: - "/bin/bash" - "-c" args: - 'while [ -n "$(kubectl get pods -n {{ template "cluster.namespace" . }} -l app=storageos --ignore-not-found)" ]; do echo "Pods stil deleting"; sleep 5; done' restartPolicy: Never backoffLimit: 4 --- {{- end }} # Seperation between pre- & post-delete hooks # The storageoscluster CR must be deleted before the operator, so the operator # can handle cluster tear down. # Some resources must be deleted after the operator otherwise the operator # will re-create them. apiVersion: v1 kind: ServiceAccount metadata: name: storageos-cleanup namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": post-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "1" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: storageos:cleanup annotations: "helm.sh/hook": post-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "1" rules: # Using apiGroup "apps" for daemonsets fails and the permission error indicates # that it's in group "extensions". Not sure if it's a Job specific behavior, # because the daemonsets deployed by the operator use "apps" apiGroup. - apiGroups: - "" resources: - pods verbs: - list - apiGroups: - extensions resources: - daemonsets - deployments verbs: - delete - apiGroups: - apps resources: - statefulsets - deployments - daemonsets verbs: - delete - apiGroups: - rbac.authorization.k8s.io resources: - roles - rolebindings - clusterroles - clusterrolebindings verbs: - delete - apiGroups: - storage.k8s.io resources: - storageclasses verbs: - delete - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - delete - apiGroups: - "" resources: - serviceaccounts - secrets - services - configmaps verbs: - delete - apiGroups: - storageos.com resources: - storageosclusters verbs: - get - patch - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: storageos:cleanup annotations: "helm.sh/hook": post-delete "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" "helm.sh/hook-weight": "2" subjects: - name: storageos-cleanup kind: ServiceAccount namespace: {{ .Release.Namespace }} roleRef: name: storageos:cleanup kind: ClusterRole apiGroup: rbac.authorization.k8s.io --- # Delete some misc operator files that aren't cleaned up otherwise. # Needs to be done afterwards in a post-delete hook as otherwise the operator # will sometimes recreate them before it's destroyed. apiVersion: batch/v1 kind: Job metadata: name: "storageos-operator-data-cleanup" namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": post-delete "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" "helm.sh/hook-weight": "3" spec: template: spec: serviceAccountName: storageos-cleanup containers: - name: "storageos-operator-data-cleanup" image: "{{ $.Values.images.kubectl.registry }}/{{ $.Values.images.kubectl.image }}:{{ $.Values.images.kubectl.tag }}" command: - kubectl - -n - {{ .Release.Namespace }} - delete - configmap/operator - configmap/storageos-api-manager-leader - secret/storageos-operator-webhook - secret/storageos-webhook - --ignore-not-found=true restartPolicy: Never backoffLimit: 4