Merge pull request #1040 from MonzElmasry/rke2_cis_1.6

add rke2 cis 1.6 profile and benchmark
pull/1101/head
actions 2021-04-08 20:17:42 +00:00
parent a7cc671b99
commit 331644c54b
515 changed files with 106523 additions and 0 deletions

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/release-name: rancher-cis-benchmark-crd
apiVersion: v1
description: Installs the CRDs for rancher-cis-benchmark.
name: rancher-cis-benchmark-crd
type: application
version: 1.0.402-rc00

View File

@ -0,0 +1,2 @@
# rancher-cis-benchmark-crd
A Rancher chart that installs the CRDs used by rancher-cis-benchmark.

View File

@ -0,0 +1,149 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscans.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .status.lastRunScanProfileName
name: ClusterScanProfile
type: string
- JSONPath: .status.summary.total
name: Total
type: string
- JSONPath: .status.summary.pass
name: Pass
type: string
- JSONPath: .status.summary.fail
name: Fail
type: string
- JSONPath: .status.summary.skip
name: Skip
type: string
- JSONPath: .status.summary.warn
name: Warn
type: string
- JSONPath: .status.summary.notApplicable
name: Not Applicable
type: string
- JSONPath: .status.lastRunTimestamp
name: LastRunTimestamp
type: string
- JSONPath: .spec.scheduledScanConfig.cronSchedule
name: CronSchedule
type: string
group: cis.cattle.io
names:
kind: ClusterScan
plural: clusterscans
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
scanProfileName:
nullable: true
type: string
scheduledScanConfig:
nullable: true
properties:
cronSchedule:
nullable: true
type: string
retentionCount:
type: integer
scanAlertRule:
nullable: true
properties:
alertOnComplete:
type: boolean
alertOnFailure:
type: boolean
type: object
type: object
scoreWarning:
enum:
- pass
- fail
nullable: true
type: string
type: object
status:
properties:
NextScanAt:
nullable: true
type: string
ScanAlertingRuleName:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
display:
nullable: true
properties:
error:
type: boolean
message:
nullable: true
type: string
state:
nullable: true
type: string
transitioning:
type: boolean
type: object
lastRunScanProfileName:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
observedGeneration:
type: integer
summary:
nullable: true
properties:
fail:
type: integer
notApplicable:
type: integer
pass:
type: integer
skip:
type: integer
total:
type: integer
warn:
type: integer
type: object
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,55 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanbenchmarks.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.clusterProvider
name: ClusterProvider
type: string
- JSONPath: .spec.minKubernetesVersion
name: MinKubernetesVersion
type: string
- JSONPath: .spec.maxKubernetesVersion
name: MaxKubernetesVersion
type: string
- JSONPath: .spec.customBenchmarkConfigMapName
name: customBenchmarkConfigMapName
type: string
- JSONPath: .spec.customBenchmarkConfigMapNamespace
name: customBenchmarkConfigMapNamespace
type: string
group: cis.cattle.io
names:
kind: ClusterScanBenchmark
plural: clusterscanbenchmarks
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
clusterProvider:
nullable: true
type: string
customBenchmarkConfigMapName:
nullable: true
type: string
customBenchmarkConfigMapNamespace:
nullable: true
type: string
maxKubernetesVersion:
nullable: true
type: string
minKubernetesVersion:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,37 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanprofiles.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanProfile
plural: clusterscanprofiles
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
skipTests:
items:
nullable: true
type: string
nullable: true
type: array
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,40 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanreports.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.lastRunTimestamp
name: LastRunTimestamp
type: string
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanReport
plural: clusterscanreports
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
reportJSON:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,18 @@
annotations:
catalog.cattle.io/auto-install: rancher-cis-benchmark-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: CIS Benchmark
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/os: linux
catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1
catalog.cattle.io/release-name: rancher-cis-benchmark
catalog.cattle.io/ui-component: rancher-cis-benchmark
apiVersion: v1
appVersion: v1.0.4
description: The cis-operator enables running CIS benchmark security scans on a kubernetes
cluster
icon: https://charts.rancher.io/assets/logos/cis-kube-bench.svg
keywords:
- security
name: rancher-cis-benchmark
version: 1.0.402-rc00

View File

@ -0,0 +1,9 @@
# Rancher CIS Benchmark Chart
The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports that can be downloaded.
# Installation
```
helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system
```

View File

@ -0,0 +1,15 @@
# Rancher CIS Benchmarks
This chart enables security scanning of the cluster using [CIS (Center for Internet Security) benchmarks](https://www.cisecurity.org/benchmark/kubernetes/).
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/cis-scans/v2.5/).
This chart installs the following components:
- [cis-operator](https://github.com/rancher/cis-operator) - The cis-operator handles launching the [kube-bench](https://github.com/aquasecurity/kube-bench) tool that runs a suite of CIS tests on the nodes of your Kubernetes cluster. After scans finish, the cis-operator generates a compliance report that can be downloaded.
- Scans - A scan is a CRD (`ClusterScan`) that defines when to trigger CIS scans on the cluster based on the defined profile. A report is created after the scan is completed.
- Profiles - A profile is a CRD (`ClusterScanProfile`) that defines the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. This chart installs a few default `ClusterScanProfile` custom resources with no skipped tests, which can immediately be used to launch CIS scans.
- Benchmark Versions - A benchmark version is a CRD (`ClusterScanBenchmark`) that defines the CIS benchmark version to run using kube-bench as well as the valid configuration parameters for that benchmark. This chart installs a few default `ClusterScanBenchmark` custom resources.
- Alerting Resources - Rancher's CIS Benchmark application lets you run a cluster scan on a schedule, and send alerts when scans finish.
- If you want to enable alerts to be delivered when a cluster scan completes, you need to ensure that [Rancher's Monitoring and Alerting](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/) application is pre-installed and the [Receivers and Routes](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/#alertmanager-config) are configured to send out alerts.
- Additionally, you need to set `alerts: true` in the Values YAML while installing or upgrading this chart.

View File

@ -0,0 +1,23 @@
{{/* Ensure namespace is set the same everywhere */}}
{{- define "cis.namespace" -}}
{{- .Release.Namespace | default "cis-operator-system" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux_node_tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}

View File

@ -0,0 +1,14 @@
{{- if .Values.alerts.enabled -}}
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: rancher-cis-pod-monitor
namespace: {{ template "cis.namespace" . }}
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
podMetricsEndpoints:
- port: cismetrics
{{- end }}

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.5
spec:
clusterProvider: ""
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.6
spec:
clusterProvider: ""
minKubernetesVersion: "1.16.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: eks-1.0
spec:
clusterProvider: eks
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: gke-1.0
spec:
clusterProvider: gke
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.5-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.5-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.6-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.16.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.6-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.16.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.5-hardened
spec:
clusterProvider: rke2
minKubernetesVersion: "1.18.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.5-permissive
spec:
clusterProvider: rke2
minKubernetesVersion: "1.18.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.6-hardened
spec:
clusterProvider: rke2
minKubernetesVersion: "1.20.5"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.6-permissive
spec:
clusterProvider: rke2
minKubernetesVersion: "1.20.5"

View File

@ -0,0 +1,49 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-admin
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["create", "update", "delete", "patch","get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-view
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: default-clusterscanprofiles
namespace: {{ template "cis.namespace" . }}
data:
# Default ClusterScanProfiles per cluster provider type
rke: |-
<1.16.0: rke-profile-permissive-1.5
>=1.16.0: rke-profile-permissive-1.6
rke2: |-
<1.20.0: rke2-cis-1.5-profile-permissive
>=1.20.0: rke2-cis-1.6-profile-permissive
eks: "eks-profile"
gke: "gke-profile"
default: "cis-1.6-profile"

View File

@ -0,0 +1,57 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cis-operator
namespace: {{ template "cis.namespace" . }}
labels:
cis.cattle.io/operator: cis-operator
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
template:
metadata:
labels:
cis.cattle.io/operator: cis-operator
spec:
serviceAccountName: cis-operator-serviceaccount
containers:
- name: cis-operator
image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}'
imagePullPolicy: Always
ports:
- name: cismetrics
containerPort: {{ .Values.alerts.metricsPort }}
env:
- name: SECURITY_SCAN_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }}
- name: SECURITY_SCAN_IMAGE_TAG
value: {{ .Values.image.securityScan.tag }}
- name: SONOBUOY_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }}
- name: SONOBUOY_IMAGE_TAG
value: {{ .Values.image.sonobuoy.tag }}
- name: CIS_ALERTS_METRICS_PORT
value: '{{ .Values.alerts.metricsPort }}'
- name: CIS_ALERTS_SEVERITY
value: {{ .Values.alerts.severity }}
- name: CIS_ALERTS_ENABLED
value: {{ .Values.alerts.enabled | default "false" | quote }}
- name: CLUSTER_NAME
value: {{ .Values.global.cattle.clusterName }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- include "linux_node_tolerations" . | nindent 8}}
{{- with .Values.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ template "cis.namespace" . }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,20 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: cis-operator-serviceaccount
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ template "cis.namespace" . }}]
backoffLimit: 1

View File

@ -0,0 +1,43 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cis-operator-role
subjects:
- kind: ServiceAccount
name: cis-serviceaccount
namespace: {{ template "cis.namespace" . }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cis-operator-installer
subjects:
- kind: ServiceAccount
name: cis-operator-serviceaccount
namespace: {{ template "cis.namespace" . }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: cis-1.5-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: cis-1.5

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: cis-1.6-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: cis-1.6

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-hardened-1.5
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.5-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-permissive-1.5
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.5-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-hardened-1.6
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.6-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-permissive-1.6
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.6-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.5-profile-hardened
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.5-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.5-profile-permissive
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.5-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.6-profile-hardened
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.6-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.6-profile-permissive
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.6-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: eks-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: eks-1.0

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: gke-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: gke-1.0

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
name: cis-operator-serviceaccount
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-serviceaccount

View File

@ -0,0 +1,17 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "cis.cattle.io/v1/ClusterScan" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanBenchmark" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanProfile" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanReport" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,45 @@
# Default values for rancher-cis-benchmark.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
cisoperator:
repository: rancher/cis-operator
tag: v1.0.4-rc1
securityScan:
repository: rancher/security-scan
tag: v0.2.3-rc1
sonobuoy:
repository: rancher/mirrored-sonobuoy-sonobuoy
tag: v0.16.3
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
global:
cattle:
systemDefaultRegistry: ""
clusterName: ""
kubectl:
repository: rancher/kubectl
tag: v1.20.2
alerts:
enabled: false
severity: warning
metricsPort: 8080

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/release-name: rancher-monitoring-crd
apiVersion: v1
description: Installs the CRDs for rancher-monitoring.
name: rancher-monitoring-crd
type: application
version: 14.5.100-rc01

View File

@ -0,0 +1,2 @@
# rancher-monitoring-crd
A Rancher chart that installs the CRDs used by rancher-monitoring.

View File

@ -0,0 +1,358 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: podmonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: PodMonitor
listKind: PodMonitorList
plural: podmonitors
singular: podmonitor
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: PodMonitor defines monitoring for a set of pods.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Pod selection for target discovery by Prometheus.
properties:
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: Selector to select which namespaces the Endpoints objects are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podMetricsEndpoints:
description: A list of endpoints allowed as part of this PodMonitor.
items:
description: PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics.
properties:
basicAuth:
description: 'BasicAuth allow an endpoint to authenticate over basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint'
properties:
password:
description: The secret in the service monitor namespace that contains the password for authentication.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
username:
description: The secret in the service monitor namespace that contains the username for authentication.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
bearerTokenSecret:
description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
honorLabels:
description: HonorLabels chooses the metric's labels on collisions with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
params:
additionalProperties:
items:
type: string
type: array
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the pod port this endpoint refers to. Mutually exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: 'Deprecated: Use ''port'' instead.'
x-kubernetes-int-or-string: true
tlsConfig:
description: TLS configuration to use when scraping the endpoint.
properties:
ca:
description: Struct containing the CA cert to use for the targets.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
cert:
description: Struct containing the client cert file for the targets.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
type: array
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
format: int64
type: integer
selector:
description: Selector to select Pod objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
targetLimit:
description: TargetLimit defines a limit on the number of scraped targets that will be accepted.
format: int64
type: integer
required:
- podMetricsEndpoints
- selector
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,202 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: probes.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: Probe
listKind: ProbeList
plural: probes
singular: probe
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: Probe defines monitoring for a set of static targets or ingresses.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Ingress selection for target discovery by Prometheus.
properties:
interval:
description: Interval at which targets are probed using the configured prober. If not specified Prometheus' global scrape interval is used.
type: string
jobName:
description: The job name assigned to scraped metrics by default.
type: string
module:
description: 'The module to use for probing specifying how to probe the target. Example module configuring in the blackbox exporter: https://github.com/prometheus/blackbox_exporter/blob/master/example.yml'
type: string
prober:
description: Specification for the prober to use for probing targets. The prober.URL parameter is required. Targets cannot be probed if left empty.
properties:
path:
description: Path to collect metrics from. Defaults to `/probe`.
type: string
scheme:
description: HTTP scheme to use for scraping. Defaults to `http`.
type: string
url:
description: Mandatory URL of the prober.
type: string
required:
- url
type: object
scrapeTimeout:
description: Timeout for scraping metrics from the Prometheus exporter.
type: string
targets:
description: Targets defines a set of static and/or dynamically discovered targets to be probed using the prober.
properties:
ingress:
description: Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing.
properties:
namespaceSelector:
description: Select Ingress objects by namespace.
properties:
any:
description: Boolean describing whether all namespaces are selected in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
relabelingConfigs:
description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
selector:
description: Select Ingress objects by labels.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: object
staticConfig:
description: 'StaticConfig defines static targets which are considers for probing. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.'
properties:
labels:
additionalProperties:
type: string
description: Labels assigned to all metrics scraped from the targets.
type: object
relabelingConfigs:
description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
static:
description: Targets is a list of URLs to probe using the configured prober.
items:
type: string
type: array
type: object
type: object
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,90 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: prometheusrules.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: PrometheusRule
listKind: PrometheusRuleList
plural: prometheusrules
singular: prometheusrule
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: PrometheusRule defines recording and alerting rules for a Prometheus instance
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired alerting rule definitions for Prometheus.
properties:
groups:
description: Content of Prometheus rule file
items:
description: 'RuleGroup is a list of sequentially evaluated recording and alerting rules. Note: PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. Valid values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response'
properties:
interval:
type: string
name:
type: string
partial_response_strategy:
type: string
rules:
items:
description: Rule describes an alerting or recording rule.
properties:
alert:
type: string
annotations:
additionalProperties:
type: string
type: object
expr:
anyOf:
- type: integer
- type: string
x-kubernetes-int-or-string: true
for:
type: string
labels:
additionalProperties:
type: string
type: object
record:
type: string
required:
- expr
type: object
type: array
required:
- name
- rules
type: object
type: array
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,375 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: servicemonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: ServiceMonitor
listKind: ServiceMonitorList
plural: servicemonitors
singular: servicemonitor
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: ServiceMonitor defines monitoring for a set of services.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Service selection for target discovery by Prometheus.
properties:
endpoints:
description: A list of endpoints allowed as part of this ServiceMonitor.
items:
description: Endpoint defines a scrapeable endpoint serving Prometheus metrics.
properties:
basicAuth:
description: 'BasicAuth allow an endpoint to authenticate over basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
properties:
password:
description: The secret in the service monitor namespace that contains the password for authentication.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
username:
description: The secret in the service monitor namespace that contains the username for authentication.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
bearerTokenFile:
description: File to read bearer token for scraping targets.
type: string
bearerTokenSecret:
description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the service monitor and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
honorLabels:
description: HonorLabels chooses the metric's labels on collisions with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
params:
additionalProperties:
items:
type: string
type: array
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the service port this endpoint refers to. Mutually exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before scraping. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching. Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port.
x-kubernetes-int-or-string: true
tlsConfig:
description: TLS configuration to use when scraping the endpoint
properties:
ca:
description: Struct containing the CA cert to use for the targets.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
caFile:
description: Path to the CA cert in the Prometheus container to use for the targets.
type: string
cert:
description: Struct containing the client cert file for the targets.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
type: object
certFile:
description: Path to the client cert file in the Prometheus container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the targets.
properties:
key:
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
type: array
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: Selector to select which namespaces the Endpoints objects are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
format: int64
type: integer
selector:
description: Selector to select Endpoints objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
targetLabels:
description: TargetLabels transfers labels on the Kubernetes Service onto the target.
items:
type: string
type: array
targetLimit:
description: TargetLimit defines a limit on the number of scraped targets that will be accepted.
format: int64
type: integer
required:
- endpoints
- selector
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,29 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,96 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-create
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": post-install, post-upgrade, post-rollback
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: {{ .Chart.Name }}-create
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: true
runAsUser: 1000
containers:
- name: create-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- apply
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-delete
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: {{ .Chart.Name }}-delete
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: true
runAsUser: 1000
initContainers:
- name: remove-finalizers
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- apply
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
containers:
- name: delete-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- delete
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Chart.Name }}-manifest
namespace: {{ .Release.Namespace }}
data:
crd-manifest.yaml: |
{{- $currentScope := . -}}
{{- $crds := (.Files.Glob "crd-manifest/**.yaml") -}}
{{- range $path, $_ := $crds -}}
{{- with $currentScope -}}
{{ .Files.Get $path | nindent 4 }}
---
{{- end -}}{{- end -}}

View File

@ -0,0 +1,72 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ['create', 'get', 'patch', 'delete']
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ .Chart.Name }}-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-manager
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'

View File

@ -0,0 +1,11 @@
# Default values for rancher-monitoring-crd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
image:
repository: rancher/kubectl
tag: v1.20.2

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# helm/charts
OWNERS
hack/
ci/
kube-prometheus-*.tgz

View File

@ -0,0 +1,47 @@
# Changelog
All notable changes from the upstream Prometheus Operator chart will be added to this file.
## [Package Version 00] - 2020-07-19
### Added
- Added [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) as a dependency to the upstream Prometheus Operator chart to allow users to expose custom metrics from the default Prometheus instance deployed by this chart
- Remove `prometheus-operator/cleanup-crds.yaml` and `prometheus-operator/crds.yaml` from the Prometheus Operator upstream chart in favor of just using the CRD directory to install the CRDs.
- Added support for `rkeControllerManager`, `rkeScheduler`, `rkeProxy`, and `rkeEtcd` PushProx exporters for monitoring k8s components within RKE clusters
- Added support for a `k3sServer` PushProx exporter that monitors k3s server components (`kubeControllerManager`, `kubeScheduler`, and `kubeProxy`) within k3s clusters
- Added support for `kubeAdmControllerManager`, `kubeAdmScheduler`, `kubeAdmProxy`, and `kubeAdmEtcd` PushProx exporters for monitoring k8s components within kubeAdm clusters
- Added support for `rke2ControllerManager`, `rke2Scheduler`, `rke2Proxy`, and `rke2Etcd` PushProx exporters for monitoring k8s components within rke2 clusters
- Exposed `prometheus.prometheusSpec.ignoreNamespaceSelectors` on values.yaml and set it to `false` by default. This value instructs the default Prometheus server deployed with this chart to ignore the `namespaceSelector` field within any created ServiceMonitor or PodMonitor CRs that it selects. This prevents ServiceMonitors and PodMonitors from configuring the Prometheus scrape configuration to monitor resources outside the namespace that they are deployed in; if a user needs to have one ServiceMonitor / PodMonitor monitor resources within several namespaces (such as the resources that are used to monitor Istio in a default installation), they should not enable this option since it would require them to create one ServiceMonitor / PodMonitor CR per namespace that they would like to monitor. Relevant fields were also updated in the default README.md.
- Added `grafana.sidecar.dashboards.searchNamespace` to `values.yaml` with a default value of `cattle-dashboards`. The namespace provided should contain all ConfigMaps with the label `grafana_dashboard` and will be searched by the Grafana Dashboards sidecar for updates. The namespace specified is also created along with this deployment. All default dashboard ConfigMaps have been relocated from the deployment namespace to the namespace specified
- Added `monitoring-admin`, `monitoring-edit`, and `monitoring-view` default `ClusterRoles` to allow admins to assign roles to users to interact with Prometheus Operator CRs. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `ClusterRoleBinding` to bind these roles to a Subject to allow them to set up or view `ServiceMonitors` / `PodMonitors` / `PrometheusRules` and view `Prometheus` or `Alertmanager` CRs across the cluster. If `.Values.global.rbac.userRoles.aggregateRolesForRBAC` is enabled, these ClusterRoles will aggregate into the respective default ClusterRoles provided by Kubernetes
- Added `monitoring-config-admin`, `monitoring-config-edit` and `monitoring-config-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `Secrets` and `ConfigMaps` within the `cattle-monitoring-system` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-monitoring-system` namespace to allow them to modify Secrets / ConfigMaps tied to the deployment, such as your Alertmanager Config Secret.
- Added `monitoring-dashboard-admin`, `monitoring-dashboard-edit` and `monitoring-dashboard-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `ConfigMaps` within the `cattle-dashboards` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`) and deploying Grafana as part of this chart. In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-dashboards` namespace to allow them to create / modify ConfigMaps that contain the JSON used to persist Grafana Dashboards on the cluster.
- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter`
- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`.
- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart.
- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`.
- Added a default `nginx` proxy container deployed with Prometheus whose config is set in the `ConfigMap` located in `templates/prometheus/nginx-config.yaml`. The purpose of this container is to make it possible to view Prometheus's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8081` (with a `portName` of `nginx-http` instead of the default `web`), which is also where the Prometheus service will now point to, and will forward all requests to the Prometheus container listening on the default port `9090`.
- Added support for passing CIS Scans in a hardened cluster by introducing a Job that patches the default service account within the `cattle-monitoring-system` and `cattle-dashboards` namespaces on install or upgrade and adding a default allow all `NetworkPolicy` to the `cattle-monitoring-system` and `cattle-dashboards` namespaces.
### Modified
- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml`
- Modified the default `node-exporter` port from `9100` to `9796`
- Modified the default `nameOverride` to `rancher-monitoring`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Modified the default `namespaceOverride` to `cattle-monitoring-system`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Configured some default values for `grafana.service` values and exposed them in the default README.md
- The default namespaces the following ServiceMonitors were changed from the deployment namespace to allow them to continue to monitor metrics when `prometheus.prometheusSpec.ignoreNamespaceSelectors` is enabled:
- `core-dns`: `kube-system`
- `api-server`: `default`
- `kube-controller-manager`: `kube-system`
- `kubelet`: `{{ .Values.kubelet.namespace }}`
- Disabled the following deployments by default (can be enabled if required):
- `AlertManager`
- `kube-controller-manager` metrics exporter
- `kube-etcd` metrics exporter
- `kube-scheduler` metrics exporter
- `kube-proxy` metrics exporter
- Updated default Grafana `deploymentStrategy` to `Recreate` to prevent deployments from being stuck on upgrade if a PV is attached to Grafana
- Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`.
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and replaced `grafana.rbac.pspUseAppArmor` in favor of `grafana.rbac.pspAnnotations={}` in order to make it possible to deploy this chart on a hardened cluster which does not support Seccomp or AppArmor annotations in PSPs. Users can always choose to specify the annotations they want to use for the PSP directly as part of the values provided.
- Modified `.Values.prometheus.prometheusSpec.containers` to take in a string representing a template that should be rendered by Helm (via `tpl`) instead of allowing a user to provide YAML directly.
- Modified the default Grafana configuration to auto assign users who access Grafana to the Viewer role and enable anonymous access to Grafana dashboards by default. This default works well for a Rancher user who is accessing Grafana via the `kubectl proxy` on the Rancher Dashboard UI since anonymous users who enter via the proxy are authenticated by the k8s API Server, but you can / should modify this behavior if you plan on exposing Grafana in a way that does not require authentication (e.g. as a `NodePort` service).
- Modified the default Grafana configuration to add a default dashboard for Rancher on the Grafana home page.

View File

@ -0,0 +1,12 @@
# Contributing Guidelines
## How to contribute to this chart
1. Fork this repository, develop and test your Chart.
1. Bump the chart version for every change.
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
1. Check for changes of RBAC rules.
1. Check for changes in CRD specs.
1. PR must pass the linter (`helm lint`)

View File

@ -0,0 +1,103 @@
annotations:
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
- name: Upstream Project
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
catalog.cattle.io/auto-install: rancher-monitoring-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Monitoring
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1
catalog.cattle.io/release-name: rancher-monitoring
catalog.cattle.io/requests-cpu: 4500m
catalog.cattle.io/requests-memory: 4000Mi
catalog.cattle.io/ui-component: monitoring
apiVersion: v2
appVersion: 0.46.0
dependencies:
- condition: grafana.enabled
name: grafana
repository: file://./charts/grafana
- condition: k3sServer.enabled
name: k3sServer
repository: file://./charts/k3sServer
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: file://./charts/kube-state-metrics
- condition: kubeAdmControllerManager.enabled
name: kubeAdmControllerManager
repository: file://./charts/kubeAdmControllerManager
- condition: kubeAdmEtcd.enabled
name: kubeAdmEtcd
repository: file://./charts/kubeAdmEtcd
- condition: kubeAdmProxy.enabled
name: kubeAdmProxy
repository: file://./charts/kubeAdmProxy
- condition: kubeAdmScheduler.enabled
name: kubeAdmScheduler
repository: file://./charts/kubeAdmScheduler
- condition: prometheus-adapter.enabled
name: prometheus-adapter
repository: file://./charts/prometheus-adapter
- condition: nodeExporter.enabled
name: prometheus-node-exporter
repository: file://./charts/prometheus-node-exporter
- condition: rke2ControllerManager.enabled
name: rke2ControllerManager
repository: file://./charts/rke2ControllerManager
- condition: rke2Etcd.enabled
name: rke2Etcd
repository: file://./charts/rke2Etcd
- condition: rke2Proxy.enabled
name: rke2Proxy
repository: file://./charts/rke2Proxy
- condition: rke2Scheduler.enabled
name: rke2Scheduler
repository: file://./charts/rke2Scheduler
- condition: rkeControllerManager.enabled
name: rkeControllerManager
repository: file://./charts/rkeControllerManager
- condition: rkeEtcd.enabled
name: rkeEtcd
repository: file://./charts/rkeEtcd
- condition: rkeProxy.enabled
name: rkeProxy
repository: file://./charts/rkeProxy
- condition: rkeScheduler.enabled
name: rkeScheduler
repository: file://./charts/rkeScheduler
- condition: windowsExporter.enabled
name: windowsExporter
repository: file://./charts/windowsExporter
description: Collects several related Helm charts, Grafana dashboards, and Prometheus
rules combined with documentation and scripts to provide easy to operate end-to-end
Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
home: https://github.com/prometheus-operator/kube-prometheus
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
- operator
- prometheus
- kube-prometheus
- monitoring
kubeVersion: '>=1.16.0-0'
maintainers:
- name: vsliouniaev
- name: bismarck
- email: gianrubio@gmail.com
name: gianrubio
- email: github.gkarthiks@gmail.com
name: gkarthiks
- email: scott@r6by.com
name: scottrigby
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: arvind.iyengar@suse.com
name: Arvind
name: rancher-monitoring
sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 14.5.100-rc01

View File

@ -0,0 +1,455 @@
# kube-prometheus-stack
Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts.
_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._
## Prerequisites
- Kubernetes 1.16+
- Helm 3+
## Get Repo Info
```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm
$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Dependencies
By default this chart installs additional, dependent charts:
- [kubernetes/kube-state-metrics](https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics)
- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter)
- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
To disable dependencies during installation, see [multiple releases](#multiple-releases) below.
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
## Uninstall Chart
```console
# Helm
$ helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
CRDs created by this chart are not removed by default and should be manually cleaned up:
```console
kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
kubectl delete crd alertmanagers.monitoring.coreos.com
kubectl delete crd podmonitors.monitoring.coreos.com
kubectl delete crd probes.monitoring.coreos.com
kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd thanosrulers.monitoring.coreos.com
```
## Upgrading Chart
```console
# Helm
$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
With Helm v3, CRDs created by this chart are not updated by default and should be manually updated.
Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions).
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 13.x to 14.x
Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 12.x to 13.x
Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
```
### From 11.x to 12.x
The chart was migrated to support only helm v3 and later.
### From 10.x to 11.x
Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
```
Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0.
### From 9.x to 10.x
Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
```
### From 8.x to 9.x
Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration.
### From 7.x to 8.x
Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x
```console
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0
```
Then upgrade to 8.x.x
```console
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x]
```
Minimal recommended Prometheus version for this chart release is `2.12.x`
### From 6.x to 7.x
Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0.
### From 5.x to 6.x
Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified:
```console
invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
```
If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
```console
helm show values prometheus-community/kube-prometheus-stack
```
You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options.
### Rancher Monitoring Configuration
The following table shows values exposed by Rancher Monitoring's additions to the chart:
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `nameOverride` | Provide a name that should be used instead of the chart name when naming all resources deployed by this chart |`"rancher-monitoring"`|
| `namespaceOverride` | Override the deployment namespace | `"cattle-monitoring-system"` |
| `global.rbac.userRoles.create` | Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets | `true` |
| `global.rbac.userRoles.aggregateToDefaultRoles` | Aggregate default user ClusterRoles into default k8s ClusterRoles | `true` |
| `prometheus-adapter.enabled` | Whether to install [prometheus-adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) within the cluster | `true` |
| `prometheus-adapter.prometheus.url` | A URL pointing to the Prometheus deployment within your cluster. The default value is set based on the assumption that you plan to deploy the default Prometheus instance from this chart where `.Values.namespaceOverride=cattle-monitoring-system` and `.Values.nameOverride=rancher-monitoring` | `http://rancher-monitoring-prometheus.cattle-monitoring-system.svc` |
| `prometheus-adapter.prometheus.port` | The port on the Prometheus deployment that Prometheus Adapter can make requests to | `9090` |
| `prometheus.prometheusSpec.ignoreNamespaceSelectors` | Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs. If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into | `false` |
| `alertmanager.secret.cleanupOnUninstall` | Whether or not to trigger a job to clean up the alertmanager config secret to be deleted on a `helm uninstall`. By default, this is disabled to prevent the loss of alerting configuration on an uninstall. | `false` |
| `alertmanager.secret.image.pullPolicy` | Image pull policy for job(s) related to alertmanager config secret's lifecycle | `IfNotPresent` |
| `alertmanager.secret.image.repository` | Repository to use for job(s) related to alertmanager config secret's lifecycle | `rancher/rancher-agent` |
| `alertmanager.secret.image.tag` | Tag to use for job(s) related to alertmanager config secret's lifecycle | `v2.4.8` |
The following values are enabled for different distributions via [rancher-pushprox](https://github.com/rancher/dev-charts/tree/master/packages/rancher-pushprox). See the rancher-pushprox `README.md` for more information on what all values can be configured for the PushProxy chart.
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` |
| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` |
| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` |
| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` |
| `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` |
| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` |
| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` |
| `kubeAdmProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in kubeAdm clusters | `false` |
| `kubeAdmEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in kubeAdm clusters | `false` |
### Multiple releases
The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`.
## Work-Arounds for Known Issues
### Running on private GKE clusters
When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes clusters network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod.
You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules)
Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`.
## PrometheusRules Admission Webhooks
With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster.
### How the Chart Configures the Hooks
A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits.
2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
### Alternatives
It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested.
You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true.
### Limitations
Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default.
## Developing Prometheus Rules and Grafana Dashboards
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
## Further Information
For more in-depth documentation of configuration options meanings, please see
- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
- [Prometheus](https://prometheus.io/docs/introduction/overview/)
- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart)
## prometheus.io/scrape
The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options.
For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here:
- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-servicemonitors)
- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-podmonitors)
- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)
By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release.
Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications.
An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering.
To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`.
## Migrating from stable/prometheus-operator chart
## Zero downtime
Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved.
However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime).
You can override the name to achieve this:
```console
helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator
```
**Note**: It is recommended to run this first with `--dry-run --debug`.
## Redeploy with new name (downtime)
If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration:
> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace.
1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy:
```console
kubectl patch pv/<PersistentVolume name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
```
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released.
```console
helm uninstall prometheus-operator -n monitoring
kubectl delete pvc/<PersistenceVolumeClaim name> -n monitoring
```
Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service.
```console
kubectl delete service/prometheus-operator-kubelet -n kube-system
```
You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to.
3. Remove current `spec.claimRef` values to change the PV's status from Released to Available.
```console
kubectl patch pv/<PersistentVolume name> --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring
```
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`.
The binding is done via matching a specific amount of storage requested and with certain access modes.
For example, if you had storage specified as this with **prometheus-operator**:
```yaml
volumeClaimTemplate:
spec:
storageClassName: gp2
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
```
You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode.
Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV.
This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to:
```yaml
nodeSelector:
failure-domain.beta.kubernetes.io/zone: east-west-1a
```
or passing these values as `--set` overrides during installation.
The new release should now re-attach your previously released PV with its content.
## Migrating from coreos/prometheus-operator chart
The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765).
### High-level overview of Changes
#### Added dependencies
The chart has added 3 [dependencies](#dependencies).
- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components
- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md)
#### Kubelet Service
Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice.
#### Persistent Volumes
If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
```yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-prometheus-migration-prometheus-0
spec:
accessModes:
- ReadWriteOnce
azureDisk:
cachingMode: None
diskName: pvc-prometheus-migration-prometheus-0
diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
fsType: ""
kind: Managed
readOnly: false
capacity:
storage: 1Gi
persistentVolumeReclaimPolicy: Delete
storageClassName: prometheus
volumeMode: Filesystem
```
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: prometheus
prometheus: prometheus-migration-prometheus
name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
namespace: monitoring
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: prometheus
volumeMode: Filesystem
volumeName: pvc-prometheus-migration-prometheus-0
```
The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
#### KubeProxy
The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them.
Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example:
```console
kubectl -n kube-system edit cm kube-proxy
```
```yaml
apiVersion: v1
data:
config.conf: |-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# ...
# metricsBindAddress: 127.0.0.1:10249
metricsBindAddress: 0.0.0.0:10249
# ...
kubeconfig.conf: |-
# ...
kind: ConfigMap
metadata:
labels:
app: kube-proxy
name: kube-proxy
namespace: kube-system
```

View File

@ -0,0 +1,15 @@
# Rancher Monitoring and Alerting
This chart is based on the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) chart. The chart deploys [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) and its CRDs along with [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana), [Prometheus Adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter) and additional charts / Kubernetes manifests to gather metrics. It allows users to monitor their Kubernetes clusters, view metrics in Grafana dashboards, and set up alerts and notifications.
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/).
The chart installs the following components:
- [Prometheus Operator](https://github.com/coreos/prometheus-operator) - The operator provides easy monitoring definitions for Kubernetes services, manages [Prometheus](https://prometheus.io/) and [AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/) instances, and adds default scrape targets for some Kubernetes components.
- [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) - A collection of community-curated Kubernetes manifests, Grafana Dashboards, and PrometheusRules that deploy a default end-to-end cluster monitoring configuration.
- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) - Grafana allows a user to create / view dashboards based on the cluster metrics collected by Prometheus.
- [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) / [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) / [rancher-pushprox](https://github.com/rancher/charts/tree/dev-v2.5/packages/rancher-pushprox/charts) - These charts monitor various Kubernetes components across different Kubernetes cluster types.
- [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) - The adapter allows a user to expose custom metrics, resource metrics, and external metrics on the default [Prometheus](https://prometheus.io/) instance to the Kubernetes API Server.
For more information, review the Helm README of this chart.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.vscode
.project
.idea/
*.tmproj
OWNERS

View File

@ -0,0 +1,28 @@
annotations:
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-grafana
apiVersion: v2
appVersion: 7.4.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
kubeVersion: ^1.8.0-0
maintainers:
- email: zanhsieh@gmail.com
name: zanhsieh
- email: rluckie@cisco.com
name: rtluckie
- email: maor.friedman@redhat.com
name: maorfr
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: mail@torstenwalter.de
name: torstenwalter
name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.6.4

View File

@ -0,0 +1,514 @@
# Grafana Helm Chart
* Installs the web dashboarding system [Grafana](http://grafana.org/)
## Get Repo Info
```console
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release grafana/grafana
```
## Uninstalling the Chart
To uninstall/delete the my-release deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### To 4.0.0 (And 3.12.1)
This version requires Helm >= 2.12.0.
### To 5.0.0
You have to add --force to your helm upgrade command as the labels of the chart have changed.
### To 6.0.0
This version requires Helm >= 3.1.0.
## Configuration
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag (`Must be >= 5.0.0`) | `7.4.5` |
| `image.sha` | Image sha (optional) | `2b56f6106ddc376bb46d974230d530754bf65a640dfbc5245191d72d3b49efc6` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `{}` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.portName` | Name of the port on the service | `service` |
| `service.targetPort` | Internal service is port | `3000` |
| `service.nodePort` | Kubernetes service nodePort | `nil` |
| `service.annotations` | Service annotations | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.clusterIP` | internal cluster service IP | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
| `service.externalIPs` | service external IP addresses | `[]` |
| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.path` | Ingress accepted path | `/` |
| `ingress.pathType` | Ingress type of path | `Prefix` |
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` |
| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
| `extraLabels` | Custom labels for all manifests | `{}` |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` |
| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
| `notifiers` | Configure grafana notifiers | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.enabled` | Enable LDAP authentication | `false` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `labels` | Deployment labels | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Pod labels | `{}` |
| `podPortName` | Name of the grafana port on the pod | `grafana` |
| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.10.7` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
| `sidecar.dashboards.provider.type` | Provider type | `file` |
| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` |
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` |
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` |
| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.annotations` | ServiceAccount annotations | |
| `serviceAccount.create` | Create service account | `true` |
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
| `rbac.create` | Create and use RBAC resources | `true` |
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` |
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` |
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
| `command` | Define command to be executed by grafana container at startup | `nil` |
| `testFramework.enabled` | Whether to create test-related resources | `true` |
| `testFramework.image` | `test-framework` image repository. | `bats/bats` |
| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` |
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` |
| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` |
| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
| `serviceMonitor.path` | Path to scrape | `/metrics` |
| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` |
| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` |
| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` |
| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` |
| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` |
| `imageRenderer.image.tag` | image-renderer Image tag | `latest` |
| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
| `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` |
| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` |
| `imageRenderer.service.portName` | image-renderer service port name | `'http'` |
| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` |
| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` |
| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` |
| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` |
| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` |
### Example ingress with path
With grafana 6.3 and above
```yaml
grafana.ini:
server:
domain: monitoring.example.com
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
hosts:
- "monitoring.example.com"
path: "/grafana"
```
### Example of extraVolumeMounts
```yaml
- extraVolumeMounts:
- name: plugins
mountPath: /var/lib/grafana/plugins
subPath: configs/grafana/plugins
existingClaim: existing-grafana-claim
readOnly: false
```
## Import dashboards
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
```yaml
dashboards:
default:
some-dashboard:
json: |
{
"annotations":
...
# Complete json file here
...
"title": "Some Dashboard",
"uid": "abcd1234",
"version": 1
}
custom-dashboard:
# This is a path to a file inside the dashboards directory inside the chart directory
file: dashboards/custom-dashboard.json
prometheus-stats:
# Ref: https://grafana.com/dashboards/2
gnetId: 2
revision: 2
datasource: Prometheus
local-dashboard:
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
```
## BASE64 dashboards
Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
### Gerrit use case
Gerrit API for download files has the following schema: <https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content> where {project-name} and
{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
dashboards are deleted/updated.
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: "1"
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported.
Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```yaml
datasources:
datasources.yaml:
apiVersion: 1
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```
## Sidecar for notifiers
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the notification channels in grafana can be imported. The secrets must be created before
`helm install` so that the notifiers init container can list the secrets.
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels):
```yaml
notifiers:
- name: notification-channel-1
type: slack
uid: notifier1
# either
org_id: 2
# or
org_name: Main Org.
is_default: true
send_reminder: true
frequency: 1h
disable_resolve_message: false
# See `Supported Settings` section for settings supporter for each
# alert notification type.
settings:
recipient: 'XXX'
token: 'xoxb'
uploadImage: true
url: https://slack.com
delete_notifiers:
- name: notification-channel-1
uid: notifier1
org_id: 2
- name: notification-channel-2
# default org_id: 1
```
## How to serve Grafana with a path prefix (/grafana)
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
```yaml
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
path: /grafana/?(.*)
hosts:
- k8s.example.dev
grafana.ini:
server:
root_url: http://localhost:3000/grafana # this host can be localhost
```
## How to securely reference secrets in grafana.ini
This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets.
In grafana.ini:
```yaml
grafana.ini:
[auth.generic_oauth]
enabled = true
client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}
client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret}
```
Existing secret, or created along with helm:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: auth-generic-oauth-secret
type: Opaque
stringData:
client_id: <value>
client_secret: <value>
```
Include in the `extraSecretMounts` configuration flag:
```yaml
- extraSecretMounts:
- name: auth-generic-oauth-secret-mount
secretName: auth-generic-oauth-secret
defaultMode: 0440
mountPath: /etc/secrets/auth_generic_oauth
readOnly: true
```
### extraSecretMounts using a Container Storage Interface (CSI) provider
This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure)
```yaml
- extraSecretMounts:
- name: secrets-store-inline
mountPath: /run/secrets
readOnly: true
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "my-provider"
nodePublishSecretRef:
name: akv-creds
```
## Image Renderer Plug-In
This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md)
```yaml
imageRenderer:
enabled: true
```
### Image Renderer NetworkPolicy
By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance

View File

@ -0,0 +1,54 @@
1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local
{{ if .Values.ingress.enabled }}
If you bind grafana to 80, please update values in values.yaml and reinstall:
```
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
command:
- "setcap"
- "'cap_net_bind_service=+ep'"
- "/usr/sbin/grafana-server &&"
- "sh"
- "/run.sh"
```
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
Or grafana would always crash.
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
http://{{ . }}
{{- end }}
{{ else }}
Get the Grafana URL to visit by running these commands in the same shell:
{{ if contains "NodePort" .Values.service.type -}}
export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{ else if contains "LoadBalancer" .Values.service.type -}}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
http://$SERVICE_IP:{{ .Values.service.port -}}
{{ else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000
{{- end }}
{{- end }}
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
{{- if not .Values.persistence.enabled }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
{{- end }}

View File

@ -0,0 +1,145 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account
*/}}
{{- define "grafana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "grafana.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "grafana.serviceAccountNameTest" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }}
{{- else -}}
{{ default "default" .Values.serviceAccount.nameTest }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "grafana.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "grafana.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "grafana.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "grafana.imageRenderer.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.imageRenderer.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels ImageRenderer
*/}}
{{- define "grafana.imageRenderer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,496 @@
{{- define "grafana.pod" -}}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "grafana.serviceAccountName" . }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | indent 2 }}
{{- end }}
{{- if .Values.hostAliases }}
hostAliases:
{{ toYaml .Values.hostAliases | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }}
initContainers:
{{- end }}
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
- name: init-chown-data
{{- if .Values.initChownData.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }}
securityContext:
runAsNonRoot: false
runAsUser: 0
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"]
resources:
{{ toYaml .Values.initChownData.resources | indent 6 }}
volumeMounts:
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- end }}
{{- if .Values.dashboards }}
- name: download-dashboards
{{- if .Values.downloadDashboardsImage.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
command: ["/bin/sh"]
args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ]
resources:
{{ toYaml .Values.downloadDashboards.resources | indent 6 }}
env:
{{- range $key, $value := .Values.downloadDashboards.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.downloadDashboards.envFromSecret }}
envFrom:
- secretRef:
name: {{ tpl .Values.downloadDashboards.envFromSecret . }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/grafana/download_dashboards.sh"
subPath: download_dashboards.sh
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
{{- if .Values.sidecar.datasources.envFromSecret }}
envFrom:
- secretRef:
name: {{ tpl .Values.sidecar.datasources.envFromSecret . }}
{{- end }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"
{{- if .Values.sidecar.datasources.labelValue }}
- name: LABEL_VALUE
value: {{ quote .Values.sidecar.datasources.labelValue }}
{{- end }}
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.datasources.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.datasources.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: {{ template "grafana.name" . }}-sc-notifiers
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.notifiers.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/notifiers"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.notifiers.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- if .Values.extraInitContainers }}
{{ toYaml .Values.extraInitContainers | indent 2 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: {{ .Values.sidecar.dashboards.watchMethod }}
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
{{- if .Values.sidecar.dashboards.labelValue }}
- name: LABEL_VALUE
value: {{ quote .Values.sidecar.dashboards.labelValue }}
{{- end }}
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.dashboards.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.dashboards.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
{{- if .Values.sidecar.dashboards.folderAnnotation }}
- name: FOLDER_ANNOTATION
value: "{{ .Values.sidecar.dashboards.folderAnnotation }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{- end}}
- name: {{ .Chart.Name }}
{{- if .Values.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
command:
{{- range .Values.command }}
- {{ . }}
{{- end }}
{{- end}}
{{- if .Values.containerSecurityContext }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 6 }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
{{- if .Values.ldap.enabled }}
- name: ldap
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- end }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
- name: dashboards-{{ $provider }}
mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
subPath: "{{ $key }}.json"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.dashboardsConfigMaps }}
{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }}
- name: dashboards-{{ . }}
mountPath: "/var/lib/grafana/dashboards/{{ . }}"
{{- end }}
{{- end }}
{{- if .Values.datasources }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
subPath: datasources.yaml
{{- end }}
{{- if .Values.notifiers }}
- name: config
mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml"
subPath: notifiers.yaml
{{- end }}
{{- if .Values.dashboardProviders }}
- name: config
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
{{- end }}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{ if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
subPath: provider.yaml
{{- end}}
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
subPath: {{ .subPath | default "" }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
containerPort: {{ .Values.service.targetPort }}
protocol: TCP
- name: {{ .Values.podPortName }}
containerPort: 3000
protocol: TCP
env:
{{- if not .Values.env.GF_SECURITY_ADMIN_USER }}
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }}
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if .Values.plugins }}
- name: GF_INSTALL_PLUGINS
valueFrom:
configMapKeyRef:
name: {{ template "grafana.fullname" . }}
key: plugins
{{- end }}
{{- if .Values.smtp.existingSecret }}
- name: GF_SMTP_USER
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.userKey | default "user" }}
- name: GF_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.passwordKey | default "password" }}
{{- end }}
{{ if .Values.imageRenderer.enabled }}
- name: GF_RENDERING_SERVER_URL
value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render
- name: GF_RENDERING_CALLBACK_URL
value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }}
{{ end }}
{{- range $key, $value := .Values.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{ toYaml $value | indent 10 }}
{{- end }}
{{- range $key, $value := .Values.env }}
- name: "{{ tpl $key $ }}"
value: "{{ tpl (print $value) $ }}"
{{- end }}
{{- if .Values.envFromSecret }}
envFrom:
- secretRef:
name: {{ tpl .Values.envFromSecret . }}
{{- end }}
{{- if .Values.envRenderSecret }}
envFrom:
- secretRef:
name: {{ template "grafana.fullname" . }}-env
{{- end }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
resources:
{{ toYaml .Values.resources | indent 6 }}
{{- with .Values.extraContainers }}
{{ tpl . $ | indent 2 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 2 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 2 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 2 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 2 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- if .Values.dashboards }}
{{- range (keys .Values.dashboards | sortAlpha) }}
- name: dashboards-{{ . }}
configMap:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
{{- end }}
{{- end }}
{{- if .Values.dashboardsConfigMaps }}
{{ $root := . }}
{{- range $provider, $name := .Values.dashboardsConfigMaps }}
- name: dashboards-{{ $provider }}
configMap:
name: {{ tpl $name $root }}
{{- end }}
{{- end }}
{{- if .Values.ldap.enabled }}
- name: ldap
secret:
{{- if .Values.ldap.existingSecret }}
secretName: {{ .Values.ldap.existingSecret }}
{{- else }}
secretName: {{ template "grafana.fullname" . }}
{{- end }}
items:
- key: ldap-toml
path: ldap.toml
{{- end }}
{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }}
# nothing
{{- else }}
- name: storage
{{- if .Values.persistence.inMemory.enabled }}
emptyDir:
medium: Memory
{{- if .Values.persistence.inMemory.sizeLimit }}
sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }}
{{- end -}}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
emptyDir: {}
{{- if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
configMap:
name: {{ template "grafana.fullname" . }}-config-dashboards
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
emptyDir: {}
{{- end -}}
{{- range .Values.extraSecretMounts }}
{{- if .secretName }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- else if .projected }}
- name: {{ .name }}
projected: {{- toYaml .projected | nindent 6 }}
{{- else if .csi }}
- name: {{ .name }}
csi: {{- toYaml .csi | nindent 6 }}
{{- end }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
emptyDir: {}
{{- end -}}
{{- if .Values.extraContainerVolumes }}
{{ toYaml .Values.extraContainerVolumes | indent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }}
rules:
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end}}
{{- with .Values.rbac.extraClusterRoleRules }}
{{ toYaml . | indent 0 }}
{{- end}}
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -0,0 +1,24 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "grafana.fullname" . }}-clusterrolebinding
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
roleRef:
kind: ClusterRole
{{- if (not .Values.rbac.useExistingRole) }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- else }}
name: {{ .Values.rbac.useExistingRole }}
{{- end }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,29 @@
{{- if .Values.sidecar.dashboards.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-config-dashboards
namespace: {{ template "grafana.namespace" . }}
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
{{- end}}
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
options:
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- end}}

View File

@ -0,0 +1,80 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
data:
{{- if .Values.plugins }}
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $key, $value := index .Values "grafana.ini" }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else }}
{{ $elem }} = {{ tpl (toYaml $elemVal) $ }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.datasources }}
{{ $root := . }}
{{- range $key, $value := .Values.datasources }}
{{ $key }}: |
{{ tpl (toYaml $value | indent 4) $root }}
{{- end -}}
{{- end -}}
{{- if .Values.notifiers }}
{{- range $key, $value := .Values.notifiers }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
-H "Accept: application/json" \
{{- if $value.token }}
-H "Authorization: token {{ $value.token }}" \
{{- end }}
-H "Content-Type: application/json;charset=UTF-8" \
{{ end }}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
> "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
{{- end -}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.dashboards }}
{{ $files := .Files }}
{{- range $provider, $dashboards := .Values.dashboards }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
namespace: {{ template "grafana.namespace" $ }}
labels:
{{- include "grafana.labels" $ | nindent 4 }}
dashboard-provider: {{ $provider }}
{{- if $dashboards }}
data:
{{- $dashboardFound := false }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
{{- $dashboardFound = true }}
{{ print $key | indent 2 }}.json:
{{- if hasKey $value "json" }}
|-
{{ $value.json | indent 6 }}
{{- end }}
{{- if hasKey $value "file" }}
{{ toYaml ( $files.Get $value.file ) | indent 4}}
{{- end }}
{{- end }}
{{- end }}
{{- if not $dashboardFound }}
{}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- with .Values.deploymentStrategy }}
strategy:
{{ toYaml . | trim | indent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.envRenderSecret }}
checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}-headless
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
clusterIP: None
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}

View File

@ -0,0 +1,117 @@
{{ if .Values.imageRenderer.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}-image-renderer
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- if .Values.imageRenderer.labels }}
{{ toYaml .Values.imageRenderer.labels | indent 4 }}
{{- end }}
{{- with .Values.imageRenderer.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.imageRenderer.replicas }}
revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- with .Values.imageRenderer.deploymentStrategy }}
strategy:
{{ toYaml . | trim | indent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }}
{{- with .Values.imageRenderer.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.imageRenderer.podAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.imageRenderer.schedulerName }}
schedulerName: "{{ .Values.imageRenderer.schedulerName }}"
{{- end }}
{{- if .Values.imageRenderer.serviceAccountName }}
serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}"
{{- else }}
serviceAccountName: {{ template "grafana.serviceAccountName" . }}
{{- end }}
{{- if .Values.imageRenderer.securityContext }}
securityContext:
{{ toYaml .Values.imageRenderer.securityContext | indent 2 }}
{{- end }}
{{- if .Values.imageRenderer.hostAliases }}
hostAliases:
{{ toYaml .Values.imageRenderer.hostAliases | indent 2 }}
{{- end }}
{{- if .Values.imageRenderer.priorityClassName }}
priorityClassName: {{ .Values.imageRenderer.priorityClassName }}
{{- end }}
{{- if .Values.imageRenderer.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.imageRenderer.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
- name: {{ .Chart.Name }}-image-renderer
{{- if .Values.imageRenderer.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }}
{{- if .Values.imageRenderer.command }}
command:
{{- range .Values.imageRenderer.command }}
- {{ . }}
{{- end }}
{{- end}}
ports:
- name: {{ .Values.imageRenderer.service.portName }}
containerPort: {{ .Values.imageRenderer.service.port }}
protocol: TCP
env:
- name: HTTP_PORT
value: {{ .Values.imageRenderer.service.port | quote }}
{{- range $key, $value := .Values.imageRenderer.env }}
- name: {{ $key | quote }}
value: {{ $value | quote }}
{{- end }}
securityContext:
capabilities:
drop: ['all']
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /tmp
name: image-renderer-tmpfs
{{- with .Values.imageRenderer.resources }}
resources:
{{ toYaml . | indent 12 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.imageRenderer.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.imageRenderer.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.imageRenderer.tolerations }}
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: image-renderer-tmpfs
emptyDir: {}
{{- end }}

View File

@ -0,0 +1,76 @@
{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "grafana.fullname" . }}-image-renderer-ingress
namespace: {{ template "grafana.namespace" . }}
annotations:
comment: Limit image-renderer ingress traffic from grafana
spec:
podSelector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- if .Values.imageRenderer.podLabels }}
{{ toYaml .Values.imageRenderer.podLabels | nindent 6 }}
{{- end }}
policyTypes:
- Ingress
ingress:
- ports:
- port: {{ .Values.imageRenderer.service.port }}
protocol: TCP
from:
- namespaceSelector:
matchLabels:
name: {{ template "grafana.namespace" . }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 14 }}
{{- end }}
{{ end }}
{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "grafana.fullname" . }}-image-renderer-egress
namespace: {{ template "grafana.namespace" . }}
annotations:
comment: Limit image-renderer egress traffic to grafana
spec:
podSelector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- if .Values.imageRenderer.podLabels }}
{{ toYaml .Values.imageRenderer.podLabels | nindent 6 }}
{{- end }}
policyTypes:
- Egress
egress:
# allow dns resolution
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
# talk only to grafana
- ports:
- port: {{ .Values.service.port }}
protocol: TCP
to:
- namespaceSelector:
matchLabels:
name: {{ template "grafana.namespace" . }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 14 }}
{{- end }}
{{ end }}

View File

@ -0,0 +1,28 @@
{{ if .Values.imageRenderer.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}-image-renderer
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- if .Values.imageRenderer.service.labels }}
{{ toYaml .Values.imageRenderer.service.labels | indent 4 }}
{{- end }}
{{- with .Values.imageRenderer.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
type: ClusterIP
{{- if .Values.imageRenderer.service.clusterIP }}
clusterIP: {{ .Values.imageRenderer.service.clusterIP }}
{{end}}
ports:
- name: {{ .Values.imageRenderer.service.portName }}
port: {{ .Values.imageRenderer.service.port }}
protocol: TCP
targetPort: {{ .Values.imageRenderer.service.targetPort }}
selector:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }}
{{ end }}

View File

@ -0,0 +1,80 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "grafana.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $ingressPathType := .Values.ingress.pathType -}}
{{- $extraPaths := .Values.ingress.extraPaths -}}
{{- $newAPI := .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}}
{{- if $newAPI -}}
apiVersion: networking.k8s.io/v1
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
{{- if .Values.ingress.annotations }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ tpl $value $ | quote }}
{{- end }}
{{- end }}
spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }}
{{- end }}
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ tpl . $}}
http:
paths:
{{ if $extraPaths }}
{{ toYaml $extraPaths | indent 10 }}
{{- end }}
- path: {{ $ingressPath }}
{{- if $newAPI }}
pathType: {{ $ingressPathType }}
{{- end }}
backend:
{{- if $newAPI }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- end }}
{{- else }}
- http:
paths:
- backend:
{{- if $newAPI }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
pathType: {{ $ingressPathType }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- if $ingressPath }}
path: {{ $ingressPath }}
{{- end }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-nginx-proxy-config
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
nginx.conf: |-
worker_processes auto;
error_log /dev/stdout warn;
pid /var/cache/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)';
proxy_connect_timeout 10;
proxy_read_timeout 180;
proxy_send_timeout 5;
proxy_buffering off;
proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g;
server {
listen 8080;
access_log off;
gzip on;
gzip_min_length 1k;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
proxy_set_header Host $host;
location /api/dashboards {
proxy_pass http://localhost:3000;
}
location /api/search {
proxy_pass http://localhost:3000;
sub_filter_types application/json;
sub_filter_once off;
sub_filter '"url":"/d' '"url":"d';
}
location / {
proxy_cache my_zone;
proxy_cache_valid 200 302 1d;
proxy_cache_valid 301 30d;
proxy_cache_valid any 5m;
proxy_cache_bypass $http_cache_control;
add_header X-Proxy-Cache $upstream_cache_status;
add_header Cache-Control "public";
proxy_pass http://localhost:3000/;
sub_filter_types text/html;
sub_filter_once off;
sub_filter '"appSubUrl":""' '"appSubUrl":"."';
sub_filter '"url":"/' '"url":"./';
sub_filter ':"/avatar/' ':"avatar/';
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
}
}
}

View File

@ -0,0 +1,22 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,49 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.rbac.pspAnnotations }}
annotations: {{ toYaml .Values.rbac.pspAnnotations | nindent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, without DAC_OVERRIDE or CHOWN
- FOWNER
- FSETID
- KILL
- SETGID
- SETUID
- SETPCAP
- NET_BIND_SERVICE
- NET_RAW
- SYS_CHROOT
- MKNOD
- AUDIT_WRITE
- SETFCAP
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'csi'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,33 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.persistence.finalizers }}
finalizers:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClassName }}
storageClassName: {{ .Values.persistence.storageClassName }}
{{- end -}}
{{- with .Values.persistence.selectorLabels }}
selector:
matchLabels:
{{ toYaml . | indent 6 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,32 @@
{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}}
apiVersion: {{ template "rbac.apiVersion" . }}
kind: Role
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }}
rules:
{{- if .Values.rbac.pspEnabled }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "grafana.fullname" . }}]
{{- end }}
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end }}
{{- with .Values.rbac.extraRoleRules }}
{{ toYaml . | indent 0 }}
{{- end}}
{{- else }}
rules: []
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if .Values.rbac.create -}}
apiVersion: {{ template "rbac.apiVersion" . }}
kind: RoleBinding
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
{{- if (not .Values.rbac.useExistingRole) }}
name: {{ template "grafana.fullname" . }}
{{- else }}
name: {{ .Values.rbac.useExistingRole }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end -}}

View File

@ -0,0 +1,14 @@
{{- if .Values.envRenderSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "grafana.fullname" . }}-env
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $key, $val := .Values.envRenderSecret }}
{{ $key }}: {{ $val | b64enc | quote }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
type: Opaque
data:
{{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ randAlphaNum 40 | b64enc | quote }}
{{- end }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
protocol: TCP
targetPort: {{ .Values.service.targetPort }}
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
{{- if .Values.extraExposePorts }}
{{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }}
{{- end }}
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "grafana.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- end }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: {{ .Values.serviceMonitor.interval }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
honorLabels: true
port: {{ .Values.service.portName }}
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More