[dev-v2.10] Rebase monitoring (#4384)

Co-authored-by: joshmeranda <joshua.meranda@gmail.com>
pull/4399/head
Josh Meranda 2024-09-04 14:40:18 -04:00 committed by GitHub
parent fb92195751
commit 3278cc7999
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1279 changed files with 110084 additions and 1105 deletions

View File

@ -7,4 +7,4 @@ apiVersion: v2
description: Installs the CRDs for rancher-monitoring.
name: rancher-monitoring-crd
type: application
version: 105.0.0-rc1+up57.0.3
version: 105.0.0+up57.0.3

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/release-name: rancher-monitoring-crd
apiVersion: v2
description: Installs the CRDs for rancher-monitoring.
name: rancher-monitoring-crd
type: application
version: 105.1.0-rc1+up61.3.2

View File

@ -0,0 +1,24 @@
# rancher-monitoring-crd
A Rancher chart that installs the CRDs used by rancher-monitoring.
## How does this chart work?
This chart marshalls all of the CRD files placed in the `crd-manifest` directory into a ConfigMap that is installed onto a cluster alongside relevant RBAC (ServiceAccount, ClusterRoleBinding, ClusterRole, and PodSecurityPolicy).
Once the relevant dependent resourcees are installed / upgraded / rolled back, this chart executes a post-install / post-upgrade / post-rollback Job that:
- Patches any existing versions of the CRDs contained within the `crd-manifest` on the cluster to set `spec.preserveUnknownFields=false`; this step is required since, based on [Kubernetes docs](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning) and a [known workaround](https://github.com/kubernetes-sigs/controller-tools/issues/476#issuecomment-691519936), such CRDs cannot be upgraded normally from `apiextensions.k8s.io/v1beta1` to `apiextensions.k8s.io/v1`.
- Runs a `kubectl apply` on the CRDs that are contained within the crd-manifest ConfigMap to upgrade CRDs in the cluster
On an uninstall, this chart executes a separate post-delete Job that:
- Patches any existing versions of the CRDs contained within `crd-manifest` on the cluster to set `metadata.finalizers=[]`
- Runs a `kubectl delete` on the CRDs that are contained within the crd-manifest ConfigMap to clean up the CRDs from the cluster
Note: If the relevant CRDs already existed in the cluster at the time of install, this chart will absorb ownership of the lifecycle of those CRDs; therefore, on a `helm uninstall`, those CRDs will also be removed from the cluster alongside this chart.
## Why can't we just place the CRDs in the templates/ directory of the main chart?
In Helm today, you cannot declare a CRD and declare a resource of that CRD's kind in templates/ without encountering a failure on render.
## [Helm 3] Why can't we just place the CRDs in the crds/ directory of the main chart?
The Helm 3 `crds/` directory only supports the installation of CRDs, but does not support the upgrade and removal of CRDs, unlike what this chart facilitiates.

View File

@ -0,0 +1,30 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,102 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-create
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": post-install, post-upgrade, post-rollback
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed
spec:
template:
metadata:
name: {{ .Chart.Name }}-create
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: false
runAsUser: 0
containers:
- name: create-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
echo "Applying CRDs...";
mkdir -p /etc/crd;
base64 -d /etc/config/crd-manifest.tgz.b64 | tar -xzv -C /etc/crd;
kubectl replace -Rf /etc/crd || kubectl create -Rf /etc/crd;
echo "Done!"
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-delete
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed
spec:
template:
metadata:
name: {{ .Chart.Name }}-delete
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: false
runAsUser: 0
containers:
- name: delete-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
echo "Deleting CRDs...";
mkdir -p /etc/crd;
base64 -d /etc/config/crd-manifest.tgz.b64 | tar -xzv -C /etc/crd;
kubectl delete --ignore-not-found=true -Rf /etc/crd;
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Chart.Name }}-manifest
namespace: {{ .Release.Namespace }}
data:
crd-manifest.tgz.b64:
{{- .Files.Get "files/crd-manifest.tgz" | b64enc | indent 4 }}

View File

@ -0,0 +1,76 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ['create', 'get', 'patch', 'delete', 'update', 'list']
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ .Chart.Name }}-manager
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-manager
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'
{{- end }}

View File

@ -0,0 +1,17 @@
# Default values for rancher-monitoring-crd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
psp:
enabled: false
systemDefaultRegistry: ""
image:
repository: rancher/shell
tag: v0.2.1
nodeSelector: {}
tolerations: []

View File

@ -123,4 +123,4 @@ sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 105.0.0-rc1+up57.0.3
version: 105.0.0+up57.0.3

Some files were not shown because too many files have changed in this diff Show More