rancher-charts/packages/rancher-monitoring/rancher-monitoring.patch

2621 lines
123 KiB
Diff

diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/Chart.yaml packages/rancher-monitoring/charts/Chart.yaml
--- packages/rancher-monitoring/charts-original/Chart.yaml
+++ packages/rancher-monitoring/charts/Chart.yaml
@@ -5,31 +5,34 @@
- name: Upstream Project
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/namespace: cattle-monitoring-system
+ catalog.cattle.io/release-name: rancher-monitoring
+ catalog.cattle.io/ui-component: monitoring
+ catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1
apiVersion: v1
appVersion: 0.38.1
-description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
- and Prometheus rules combined with documentation and scripts to provide easy to
- operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
- Operator.
+description: Collects several related Helm charts, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
home: https://github.com/prometheus-operator/kube-prometheus
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
-- operator
-- prometheus
-- kube-prometheus
+ - operator
+ - prometheus
+ - kube-prometheus
+ - monitoring
maintainers:
-- name: vsliouniaev
-- name: bismarck
-- email: gianrubio@gmail.com
- name: gianrubio
-- email: github.gkarthiks@gmail.com
- name: gkarthiks
-- email: scott@r6by.com
- name: scottrigby
-- email: miroslav.hadzhiev@gmail.com
- name: Xtigyro
-name: kube-prometheus-stack
+ - name: vsliouniaev
+ - name: bismarck
+ - email: gianrubio@gmail.com
+ name: gianrubio
+ - email: github.gkarthiks@gmail.com
+ name: gkarthiks
+ - email: scott@r6by.com
+ name: scottrigby
+ - email: miroslav.hadzhiev@gmail.com
+ name: Xtigyro
+name: rancher-monitoring
sources:
-- https://github.com/prometheus-community/helm-charts
-- https://github.com/prometheus-operator/kube-prometheus
+ - https://github.com/prometheus-community/helm-charts
+ - https://github.com/prometheus-operator/kube-prometheus
version: 9.4.2
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/README.md packages/rancher-monitoring/charts/README.md
--- packages/rancher-monitoring/charts-original/README.md
+++ packages/rancher-monitoring/charts/README.md
@@ -127,7 +127,43 @@
helm show values prometheus-community/kube-prometheus-stack
```
-You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options.
+You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options.
+
+### Rancher Monitoring Configuration
+
+The following table shows values exposed by Rancher Monitoring's additions to the chart:
+
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `nameOverride` | Provide a name that should be used instead of the chart name when naming all resources deployed by this chart |`"rancher-monitoring"`|
+| `namespaceOverride` | Override the deployment namespace | `"cattle-monitoring-system"` |
+| `global.rbac.userRoles.create` | Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets | `true` |
+| `global.rbac.userRoles.aggregateToDefaultRoles` | Aggregate default user ClusterRoles into default k8s ClusterRoles | `true` |
+| `prometheus-adapter.enabled` | Whether to install [prometheus-adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) within the cluster | `true` |
+| `prometheus-adapter.prometheus.url` | A URL pointing to the Prometheus deployment within your cluster. The default value is set based on the assumption that you plan to deploy the default Prometheus instance from this chart where `.Values.namespaceOverride=cattle-monitoring-system` and `.Values.nameOverride=rancher-monitoring` | `http://rancher-monitoring-prometheus.cattle-monitoring-system.svc` |
+| `prometheus-adapter.prometheus.port` | The port on the Prometheus deployment that Prometheus Adapter can make requests to | `9090` |
+| `prometheus.prometheusSpec.ignoreNamespaceSelectors` | Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs. If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into | `false` |
+| `alertmanager.secret.cleanupOnUninstall` | Whether or not to trigger a job to clean up the alertmanager config secret to be deleted on a `helm uninstall`. By default, this is disabled to prevent the loss of alerting configuration on an uninstall. | `false` |
+| `alertmanager.secret.image.pullPolicy` | Image pull policy for job(s) related to alertmanager config secret's lifecycle | `IfNotPresent` |
+| `alertmanager.secret.image.repository` | Repository to use for job(s) related to alertmanager config secret's lifecycle | `rancher/rancher-agent` |
+| `alertmanager.secret.image.tag` | Tag to use for job(s) related to alertmanager config secret's lifecycle | `v2.4.8` |
+
+The following values are enabled for different distributions via [rancher-pushprox](https://github.com/rancher/dev-charts/tree/master/packages/rancher-pushprox). See the rancher-pushprox `README.md` for more information on what all values can be configured for the PushProxy chart.
+
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` |
+| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` |
+| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` |
+| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` |
+| `k3sControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in k3s clusters | `false` |
+| `k3sScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in k3s clusters | `false` |
+| `k3sProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in k3s clusters | `false` |
+| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` |
+| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` |
+| `kubeAdmProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in kubeAdm clusters | `false` |
+| `kubeAdmEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in kubeAdm clusters | `false` |
+
### Multiple releases
@@ -221,7 +257,7 @@
#### CoreOS CRDs
-The CRDs are provisioned using crd-install hooks, rather than relying on a separate chart installation. If you already have these CRDs provisioned and don't want to remove them, you can disable the CRD creation by these hooks by setting `prometheusOperator.createCustomResource` to `false` (not required if using Helm v3).
+The CRDs are provisioned using a separate chart installation within the Helm chart `rancher-monitoring-crd` that is packaged alongside this chart.
#### Kubelet Service
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/templates/_pod.tpl packages/rancher-monitoring/charts/charts/grafana/templates/_pod.tpl
--- packages/rancher-monitoring/charts-original/charts/grafana/templates/_pod.tpl
+++ packages/rancher-monitoring/charts/charts/grafana/templates/_pod.tpl
@@ -1,4 +1,3 @@
-
{{- define "grafana.pod" -}}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
@@ -21,9 +20,9 @@
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
- name: init-chown-data
{{- if .Values.initChownData.image.sha }}
- image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}"
{{- else }}
- image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }}
securityContext:
@@ -41,9 +40,9 @@
{{- if .Values.dashboards }}
- name: download-dashboards
{{- if .Values.downloadDashboardsImage.sha }}
- image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}"
{{- else }}
- image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
command: ["/bin/sh"]
@@ -73,9 +72,9 @@
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
{{- if .Values.sidecar.image.sha }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
@@ -108,9 +107,9 @@
{{- if .Values.sidecar.notifiers.enabled }}
- name: {{ template "grafana.name" . }}-sc-notifiers
{{- if .Values.sidecar.image.sha }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
@@ -153,9 +152,9 @@
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
{{- if .Values.sidecar.image.sha }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
- image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
@@ -187,9 +186,9 @@
{{- end}}
- name: {{ .Chart.Name }}
{{- if .Values.image.sha }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
{{- else }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/values.yaml packages/rancher-monitoring/charts/charts/grafana/values.yaml
--- packages/rancher-monitoring/charts-original/charts/grafana/values.yaml
+++ packages/rancher-monitoring/charts/charts/grafana/values.yaml
@@ -49,7 +49,7 @@
# schedulerName: "default-scheduler"
image:
- repository: grafana/grafana
+ repository: rancher/grafana-grafana
tag: 7.1.5
sha: ""
pullPolicy: IfNotPresent
@@ -63,7 +63,7 @@
testFramework:
enabled: true
- image: "bats/bats"
+ image: "rancher/bats-bats"
tag: "v1.1.0"
imagePullPolicy: IfNotPresent
securityContext: {}
@@ -91,7 +91,7 @@
# priorityClassName:
downloadDashboardsImage:
- repository: curlimages/curl
+ repository: rancher/curlimages-curl
tag: 7.70.0
sha: ""
pullPolicy: IfNotPresent
@@ -244,7 +244,7 @@
## initChownData container image
##
image:
- repository: busybox
+ repository: rancher/library-busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
@@ -486,7 +486,7 @@
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image:
- repository: kiwigrid/k8s-sidecar
+ repository: rancher/kiwigrid-k8s-sidecar
tag: 0.1.151
sha: ""
imagePullPolicy: IfNotPresent
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml
--- packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml
+++ packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml
@@ -154,7 +154,7 @@
- --pod-namespace=$(POD_NAMESPACE)
{{ end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
livenessProbe:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/kube-state-metrics/values.yaml packages/rancher-monitoring/charts/charts/kube-state-metrics/values.yaml
--- packages/rancher-monitoring/charts-original/charts/kube-state-metrics/values.yaml
+++ packages/rancher-monitoring/charts/charts/kube-state-metrics/values.yaml
@@ -1,7 +1,7 @@
# Default values for kube-state-metrics.
prometheusScrape: true
image:
- repository: quay.io/coreos/kube-state-metrics
+ repository: rancher/coreos-kube-state-metrics
tag: v1.9.7
pullPolicy: IfNotPresent
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md
--- packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md
+++ packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md
@@ -111,7 +111,7 @@
| Parameter | Description | Default |
| ------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------|
| `affinity` | Node affinity | `{}` |
-| `image.repository` | Image repository | `directxman12/k8s-prometheus-adapter-amd64` |
+| `image.repository` | Image repository | `rancher/directxman12-k8s-prometheus-adapter-amd64` |
| `image.tag` | Image tag | `v0.6.0` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `{}` |
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-adapter/templates/custom-metrics-apiserver-deployment.yaml packages/rancher-monitoring/charts/charts/prometheus-adapter/templates/custom-metrics-apiserver-deployment.yaml
--- packages/rancher-monitoring/charts-original/charts/prometheus-adapter/templates/custom-metrics-apiserver-deployment.yaml
+++ packages/rancher-monitoring/charts/charts/prometheus-adapter/templates/custom-metrics-apiserver-deployment.yaml
@@ -36,7 +36,7 @@
{{- end }}
containers:
- name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /adapter
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-adapter/values.yaml packages/rancher-monitoring/charts/charts/prometheus-adapter/values.yaml
--- packages/rancher-monitoring/charts-original/charts/prometheus-adapter/values.yaml
+++ packages/rancher-monitoring/charts/charts/prometheus-adapter/values.yaml
@@ -2,7 +2,7 @@
affinity: {}
image:
- repository: directxman12/k8s-prometheus-adapter-amd64
+ repository: rancher/directxman12-k8s-prometheus-adapter-amd64
tag: v0.6.0
pullPolicy: IfNotPresent
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS
--- packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS
+++ packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- gianrubio
+- vsliouniaev
+reviewers:
+- gianrubio
+- vsliouniaev
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/templates/daemonset.yaml packages/rancher-monitoring/charts/charts/prometheus-node-exporter/templates/daemonset.yaml
--- packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/templates/daemonset.yaml
+++ packages/rancher-monitoring/charts/charts/prometheus-node-exporter/templates/daemonset.yaml
@@ -33,7 +33,7 @@
{{- end }}
containers:
- name: node-exporter
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --path.procfs=/host/proc
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/values.yaml packages/rancher-monitoring/charts/charts/prometheus-node-exporter/values.yaml
--- packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/values.yaml
+++ packages/rancher-monitoring/charts/charts/prometheus-node-exporter/values.yaml
@@ -2,7 +2,7 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
- repository: quay.io/prometheus/node-exporter
+ repository: rancher/prom-node-exporter
tag: v1.0.1
pullPolicy: IfNotPresent
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-alertmanager.yaml packages/rancher-monitoring/charts/crds/crd-alertmanager.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-alertmanager.yaml
+++ packages/rancher-monitoring/charts/crds/crd-alertmanager.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: alertmanagers.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-podmonitor.yaml packages/rancher-monitoring/charts/crds/crd-podmonitor.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-podmonitor.yaml
+++ packages/rancher-monitoring/charts/crds/crd-podmonitor.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: podmonitors.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-prometheus.yaml packages/rancher-monitoring/charts/crds/crd-prometheus.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-prometheus.yaml
+++ packages/rancher-monitoring/charts/crds/crd-prometheus.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: prometheuses.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-prometheusrules.yaml packages/rancher-monitoring/charts/crds/crd-prometheusrules.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-prometheusrules.yaml
+++ packages/rancher-monitoring/charts/crds/crd-prometheusrules.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: prometheusrules.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-servicemonitor.yaml packages/rancher-monitoring/charts/crds/crd-servicemonitor.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-servicemonitor.yaml
+++ packages/rancher-monitoring/charts/crds/crd-servicemonitor.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: servicemonitors.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/crds/crd-thanosrulers.yaml packages/rancher-monitoring/charts/crds/crd-thanosrulers.yaml
--- packages/rancher-monitoring/charts-original/crds/crd-thanosrulers.yaml
+++ packages/rancher-monitoring/charts/crds/crd-thanosrulers.yaml
@@ -4,7 +4,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
- helm.sh/hook: crd-install
creationTimestamp: null
name: thanosrulers.monitoring.coreos.com
spec:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/requirements.yaml packages/rancher-monitoring/charts/requirements.yaml
--- packages/rancher-monitoring/charts-original/requirements.yaml
+++ packages/rancher-monitoring/charts/requirements.yaml
@@ -1,16 +1,108 @@
dependencies:
-
- name: kube-state-metrics
- version: "2.8.*"
+ version: 2.8.14
repository: https://kubernetes-charts.storage.googleapis.com/
condition: kubeStateMetrics.enabled
-
- name: prometheus-node-exporter
- version: "1.11.*"
+ version: 1.11.2
repository: https://prometheus-community.github.io/helm-charts
condition: nodeExporter.enabled
-
- name: grafana
- version: "5.6.*"
+ version: 5.6.4
repository: https://grafana.github.io/helm-charts
condition: grafana.enabled
+
+ - name: prometheus-adapter
+ version: 2.4.0
+ repository: https://kubernetes-charts.storage.googleapis.com/
+ condition: prometheus-adapter.enabled
+
+ - name: rancher-pushprox
+ alias: rkeControllerManager
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rkeControllerManager.enabled
+
+ - name: rancher-pushprox
+ alias: rkeScheduler
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rkeScheduler.enabled
+
+ - name: rancher-pushprox
+ alias: rkeProxy
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rkeProxy.enabled
+
+ - name: rancher-pushprox
+ alias: rkeEtcd
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rkeEtcd.enabled
+
+ - name: rancher-pushprox
+ alias: k3sControllerManager
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: k3sControllerManager.enabled
+
+ - name: rancher-pushprox
+ alias: k3sScheduler
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: k3sScheduler.enabled
+
+ - name: rancher-pushprox
+ alias: k3sProxy
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: k3sProxy.enabled
+
+ - name: rancher-pushprox
+ alias: kubeAdmControllerManager
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: kubeAdmControllerManager.enabled
+
+ - name: rancher-pushprox
+ alias: kubeAdmScheduler
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: kubeAdmScheduler.enabled
+
+ - name: rancher-pushprox
+ alias: kubeAdmProxy
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: kubeAdmProxy.enabled
+
+ - name: rancher-pushprox
+ alias: kubeAdmEtcd
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: kubeAdmEtcd.enabled
+
+ - name: rancher-pushprox
+ alias: rke2ControllerManager
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rke2ControllerManager.enabled
+
+ - name: rancher-pushprox
+ alias: rke2Scheduler
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rke2Scheduler.enabled
+
+ - name: rancher-pushprox
+ alias: rke2Proxy
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rke2Proxy.enabled
+
+ - name: rancher-pushprox
+ alias: rke2Etcd
+ version: 0.1.0
+ repository: file://../../rancher-pushprox/charts
+ condition: rke2Etcd.enabled
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/_helpers.tpl packages/rancher-monitoring/charts/templates/_helpers.tpl
--- packages/rancher-monitoring/charts-original/templates/_helpers.tpl
+++ packages/rancher-monitoring/charts/templates/_helpers.tpl
@@ -1,3 +1,37 @@
+# Rancher
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- end -}}
+{{- end -}}
+
+# Special Exporters
+{{- define "exporter.kubeEtcd.enabled" -}}
+{{- if or .Values.kubeEtcd.enabled .Values.rkeEtcd.enabled .Values.kubeAdmEtcd.enabled .Values.rke2Etcd.enabled -}}
+"true"
+{{- end -}}
+{{- end }}
+
+{{- define "exporter.kubeControllerManager.enabled" -}}
+{{- if or .Values.kubeControllerManager.enabled .Values.rkeControllerManager.enabled .Values.k3sControllerManager.enabled .Values.kubeAdmControllerManager.enabled .Values.rke2ControllerManager.enabled -}}
+"true"
+{{- end -}}
+{{- end }}
+
+{{- define "exporter.kubeScheduler.enabled" -}}
+{{- if or .Values.kubeScheduler.enabled .Values.rkeScheduler.enabled .Values.k3sScheduler.enabled .Values.kubeAdmScheduler.enabled .Values.rke2Scheduler.enabled -}}
+"true"
+{{- end -}}
+{{- end }}
+
+{{- define "exporter.kubeProxy.enabled" -}}
+{{- if or .Values.kubeProxy.enabled .Values.rkeProxy.enabled .Values.k3sProxy.enabled .Values.kubeAdmProxy.enabled .Values.rke2Proxy.enabled -}}
+"true"
+{{- end -}}
+{{- end }}
+
+# Prometheus Operator
+
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}}
{{- define "kube-prometheus-stack.name" -}}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/alertmanager/alertmanager.yaml packages/rancher-monitoring/charts/templates/alertmanager/alertmanager.yaml
--- packages/rancher-monitoring/charts-original/templates/alertmanager/alertmanager.yaml
+++ packages/rancher-monitoring/charts/templates/alertmanager/alertmanager.yaml
@@ -9,7 +9,7 @@
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
{{- if .Values.alertmanager.alertmanagerSpec.image }}
- baseImage: {{ .Values.alertmanager.alertmanagerSpec.image.repository }}
+ baseImage: {{ template "system_default_registry" . }}{{ .Values.alertmanager.alertmanagerSpec.image.repository }}
version: {{ .Values.alertmanager.alertmanagerSpec.image.tag }}
{{- if .Values.alertmanager.alertmanagerSpec.image.sha }}
sha: {{ .Values.alertmanager.alertmanagerSpec.image.sha }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml
--- packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml
+++ packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml
@@ -0,0 +1,82 @@
+{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) (.Values.alertmanager.secret.cleanupOnUninstall) }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ labels:
+{{ include "kube-prometheus-stack.labels" . | indent 4 }}
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": post-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "5"
+spec:
+ template:
+ metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 8 }}
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ spec:
+ serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ containers:
+ - name: delete-secret
+ image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }}
+ imagePullPolicy: {{ .Values.alertmanager.secret.image.pullPolicy }}
+ command:
+ - /bin/sh
+ - -c
+ - >
+ if kubectl get secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager > /dev/null 2>&1; then
+ kubectl delete secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager
+ fi;
+ restartPolicy: OnFailure
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": post-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs: ['get', 'delete']
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": post-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+subjects:
+- kind: ServiceAccount
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": post-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
+{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/alertmanager/secret.yaml packages/rancher-monitoring/charts/templates/alertmanager/secret.yaml
--- packages/rancher-monitoring/charts-original/templates/alertmanager/secret.yaml
+++ packages/rancher-monitoring/charts/templates/alertmanager/secret.yaml
@@ -1,11 +1,16 @@
{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }}
+{{- if .Release.IsInstall }}
+{{- $secretName := (printf "alertmanager-%s-alertmanager" (include "kube-prometheus-stack.fullname" .)) }}
+{{- if (lookup "v1" "Secret" (include "kube-prometheus-stack.namespace" .) $secretName) }}
+{{- required (printf "Cannot overwrite existing secret %s in namespace %s." $secretName (include "kube-prometheus-stack.namespace" .)) "" }}
+{{- end }}{{- end }}
apiVersion: v1
kind: Secret
metadata:
- name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
namespace: {{ template "kube-prometheus-stack.namespace" . }}
-{{- if .Values.alertmanager.secret.annotations }}
annotations:
+{{- if .Values.alertmanager.secret.annotations }}
{{ toYaml .Values.alertmanager.secret.annotations | indent 4 }}
{{- end }}
labels:
@@ -20,4 +25,93 @@
{{- range $key, $val := .Values.alertmanager.templateFiles }}
{{ $key }}: {{ $val | b64enc | quote }}
{{- end }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ labels:
+{{ include "kube-prometheus-stack.labels" . | indent 4 }}
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "5"
+spec:
+ template:
+ metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 8 }}
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ spec:
+ serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ containers:
+ - name: copy-pre-install-secret
+ image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }}
+ imagePullPolicy: {{ .Values.alertmanager.secret.image.pullPolicy }}
+ command:
+ - /bin/sh
+ - -c
+ - >
+ if kubectl get secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager > /dev/null 2>&1; then
+ echo "Secret already exists"
+ exit 1
+ fi;
+ kubectl patch secret -n {{ template "kube-prometheus-stack.namespace" . }} --dry-run -o yaml
+ alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ -p '{{ printf "{\"metadata\":{\"name\": \"alertmanager-%s-alertmanager\"}}" (include "kube-prometheus-stack.fullname" .) }}'
+ | kubectl apply -f -;
+ kubectl annotate secret -n {{ template "kube-prometheus-stack.namespace" . }}
+ alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager
+ helm.sh/hook- helm.sh/hook-delete-policy- helm.sh/hook-weight-;
+ restartPolicy: OnFailure
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs: ['create', 'get', 'patch']
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+subjects:
+- kind: ServiceAccount
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+ namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ labels:
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+ "helm.sh/hook-weight": "3"
{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/exporters/core-dns/servicemonitor.yaml packages/rancher-monitoring/charts/templates/exporters/core-dns/servicemonitor.yaml
--- packages/rancher-monitoring/charts-original/templates/exporters/core-dns/servicemonitor.yaml
+++ packages/rancher-monitoring/charts/templates/exporters/core-dns/servicemonitor.yaml
@@ -3,7 +3,7 @@
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-coredns
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: "kube-system"
labels:
app: {{ template "kube-prometheus-stack.name" . }}-coredns
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/exporters/kube-api-server/servicemonitor.yaml packages/rancher-monitoring/charts/templates/exporters/kube-api-server/servicemonitor.yaml
--- packages/rancher-monitoring/charts-original/templates/exporters/kube-api-server/servicemonitor.yaml
+++ packages/rancher-monitoring/charts/templates/exporters/kube-api-server/servicemonitor.yaml
@@ -3,7 +3,7 @@
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-apiserver
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: default
labels:
app: {{ template "kube-prometheus-stack.name" . }}-apiserver
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/exporters/kube-controller-manager/servicemonitor.yaml packages/rancher-monitoring/charts/templates/exporters/kube-controller-manager/servicemonitor.yaml
--- packages/rancher-monitoring/charts-original/templates/exporters/kube-controller-manager/servicemonitor.yaml
+++ packages/rancher-monitoring/charts/templates/exporters/kube-controller-manager/servicemonitor.yaml
@@ -3,7 +3,7 @@
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: "kube-system"
labels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/exporters/kubelet/servicemonitor.yaml packages/rancher-monitoring/charts/templates/exporters/kubelet/servicemonitor.yaml
--- packages/rancher-monitoring/charts-original/templates/exporters/kubelet/servicemonitor.yaml
+++ packages/rancher-monitoring/charts/templates/exporters/kubelet/servicemonitor.yaml
@@ -3,7 +3,7 @@
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-kubelet
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.kubelet.namespace }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-kubelet
{{- include "kube-prometheus-stack.labels" . | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/configmap-dashboards.yaml packages/rancher-monitoring/charts/templates/grafana/configmap-dashboards.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/configmap-dashboards.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/configmap-dashboards.yaml
@@ -10,7 +10,7 @@
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) $dashboardName | trunc 63 | trimSuffix "-" }}
- namespace: {{ template "kube-prometheus-stack.namespace" $ }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
labels:
{{- if $.Values.grafana.sidecar.dashboards.label }}
{{ $.Values.grafana.sidecar.dashboards.label }}: "1"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/configmaps-datasources.yaml packages/rancher-monitoring/charts/templates/grafana/configmaps-datasources.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/configmaps-datasources.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/configmaps-datasources.yaml
@@ -3,7 +3,7 @@
kind: ConfigMap
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-grafana-datasource
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.datasources.searchNamespace }}
{{- if .Values.grafana.sidecar.datasources.annotations }}
annotations:
{{ toYaml .Values.grafana.sidecar.datasources.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/etcd.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/etcd.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/etcd.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/etcd.yaml
@@ -4,11 +4,12 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeEtcd.enabled }}
+{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- if (include "exporter.kubeEtcd.enabled" .)}}
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
@@ -1113,4 +1114,5 @@
"uid": "c2f4e12cdf69feb95caa41a5a1b423d9",
"version": 215
}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-node-rsrc-use.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-node-rsrc-use.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-node-rsrc-use.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-node-rsrc-use.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-node-rsrc-use" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-cluster.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-cluster.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-cluster.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-cluster.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-namespace.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-namespace.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-namespace.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-namespace.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-pod.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-pod.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-pod.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-pod.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-workload.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-workload.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-workload.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-workload.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workload" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workloads-namespace" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/nodes.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/nodes.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/nodes.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/nodes.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/persistentvolumesusage.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/persistentvolumesusage.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/persistentvolumesusage.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/persistentvolumesusage.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/pods.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/pods.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/pods.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/pods.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "pods" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards/statefulset.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards/statefulset.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards/statefulset.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards/statefulset.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/apiserver.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/apiserver.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/apiserver.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/apiserver.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "apiserver" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/cluster-total.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/cluster-total.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/cluster-total.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/cluster-total.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "cluster-total" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/controller-manager.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/controller-manager.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/controller-manager.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/controller-manager.yaml
@@ -4,11 +4,12 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeControllerManager.enabled }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- if (include "exporter.kubeControllerManager.enabled" .)}}
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "controller-manager" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
@@ -1139,4 +1140,5 @@
"uid": "72e0e05bef5099e5f049b05fdc429ed4",
"version": 0
}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/etcd.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/etcd.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/etcd.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/etcd.yaml
@@ -4,11 +4,12 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeEtcd.enabled }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- if (include "exporter.kubeEtcd.enabled" .)}}
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
@@ -1113,4 +1114,5 @@
"uid": "c2f4e12cdf69feb95caa41a5a1b423d9",
"version": 215
}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-coredns.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-coredns.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-coredns.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-coredns.yaml
@@ -4,10 +4,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-coredns" | trunc 63 | trimSuffix "-" }}
- annotations:
-{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
labels:
{{- if $.Values.grafana.sidecar.dashboards.label }}
{{ $.Values.grafana.sidecar.dashboards.label }}: "1"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-node.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-node.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-node.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-node.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-node" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workload" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workloads-namespace" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/kubelet.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/kubelet.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/kubelet.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/kubelet.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "kubelet" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/namespace-by-pod.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/namespace-by-pod.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/namespace-by-pod.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/namespace-by-pod.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "namespace-by-pod" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/namespace-by-workload.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/namespace-by-workload.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/namespace-by-workload.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/namespace-by-workload.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "namespace-by-workload" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "node-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/node-rsrc-use.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/node-rsrc-use.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/node-rsrc-use.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/node-rsrc-use.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "node-rsrc-use" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/nodes.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/nodes.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/nodes.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/nodes.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/pod-total.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/pod-total.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/pod-total.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/pod-total.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "pod-total" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "prometheus-remote-write" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/prometheus.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/prometheus.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/prometheus.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/prometheus.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "prometheus" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/proxy.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/proxy.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/proxy.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/proxy.yaml
@@ -4,11 +4,12 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeProxy.enabled }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- if (include "exporter.kubeProxy.enabled" .)}}
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "proxy" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
@@ -1218,4 +1219,4 @@
"uid": "632e265de029684c40b21cb76bca4f94",
"version": 0
}
-{{- end }}
\ No newline at end of file
+{{- end }}{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/scheduler.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/scheduler.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/scheduler.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/scheduler.yaml
@@ -4,11 +4,12 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeScheduler.enabled }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "scheduler" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
@@ -1063,4 +1064,5 @@
"uid": "2e6b6a3b4bddf1427b3a55aa1311c656",
"version": 0
}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/statefulset.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/statefulset.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/statefulset.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/statefulset.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/workload-total.yaml packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/workload-total.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/dashboards-1.14/workload-total.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/dashboards-1.14/workload-total.yaml
@@ -8,7 +8,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
+ namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "workload-total" | trunc 63 | trimSuffix "-" }}
annotations:
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/grafana/namespaces.yaml packages/rancher-monitoring/charts/templates/grafana/namespaces.yaml
--- packages/rancher-monitoring/charts-original/templates/grafana/namespaces.yaml
+++ packages/rancher-monitoring/charts/templates/grafana/namespaces.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.grafana.enabled }}
+{{- if or .Values.grafana.sidecar.dashboards.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
+ labels:
+ name: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
+{{- end }}
+---
+{{- if or .Values.grafana.sidecar.dashboards.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.grafana.sidecar.datasources.searchNamespace }}
+ labels:
+ name: {{ .Values.grafana.sidecar.datasources.searchNamespace }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/prometheus.yaml packages/rancher-monitoring/charts/templates/prometheus/prometheus.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/prometheus.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/prometheus.yaml
@@ -32,7 +32,7 @@
{{ toYaml .Values.prometheus.prometheusSpec.apiserverConfig | indent 4}}
{{- end }}
{{- if .Values.prometheus.prometheusSpec.image }}
- baseImage: {{ .Values.prometheus.prometheusSpec.image.repository }}
+ baseImage: {{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.image.repository }}
version: {{ .Values.prometheus.prometheusSpec.image.tag }}
{{- if .Values.prometheus.prometheusSpec.image.sha }}
sha: {{ .Values.prometheus.prometheusSpec.image.sha }}
@@ -59,6 +59,9 @@
{{- else }}
externalUrl: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.prometheus.service.port }}
{{- end }}
+{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
+ ignoreNamespaceSelectors: {{ .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
+{{- end }}
{{- if .Values.prometheus.prometheusSpec.nodeSelector }}
nodeSelector:
{{ toYaml .Values.prometheus.prometheusSpec.nodeSelector | indent 4 }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules/etcd.yaml packages/rancher-monitoring/charts/templates/prometheus/rules/etcd.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules/etcd.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules/etcd.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd }}
+{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.etcd }}
+{{- if (include "exporter.kubeEtcd.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -152,4 +153,5 @@
for: 10m
labels:
severity: warning
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules/kube-scheduler.rules.yaml packages/rancher-monitoring/charts/templates/prometheus/rules/kube-scheduler.rules.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules/kube-scheduler.rules.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules/kube-scheduler.rules.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler }}
+{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -60,4 +61,5 @@
labels:
quantile: '0.5'
record: cluster_quantile:scheduler_binding_latency:histogram_quantile
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules/kubernetes-absent.yaml packages/rancher-monitoring/charts/templates/prometheus/rules/kubernetes-absent.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules/kubernetes-absent.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules/kubernetes-absent.yaml
@@ -58,7 +58,7 @@
labels:
severity: critical
{{- end }}
-{{- if .Values.kubeControllerManager.enabled }}
+{{- if (include "exporter.kubeControllerManager.enabled" .)}}
- alert: KubeControllerManagerDown
annotations:
message: KubeControllerManager has disappeared from Prometheus target discovery.
@@ -68,7 +68,7 @@
labels:
severity: critical
{{- end }}
-{{- if .Values.kubeScheduler.enabled }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
- alert: KubeSchedulerDown
annotations:
message: KubeScheduler has disappeared from Prometheus target discovery.
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/etcd.yaml packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/etcd.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/etcd.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/etcd.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.etcd }}
+{{- if (include "exporter.kubeEtcd.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -152,4 +153,5 @@
for: 10m
labels:
severity: warning
-{{- end }}
\ No newline at end of file
+{{- end }}
+{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -60,4 +61,5 @@
labels:
quantile: '0.5'
record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeControllerManager.enabled }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create }}
+{{- if (include "exporter.kubeControllerManager.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -24,7 +25,7 @@
groups:
- name: kubernetes-system-controller-manager
rules:
-{{- if .Values.kubeControllerManager.enabled }}
+{{- if (include "exporter.kubeControllerManager.enabled" .)}}
- alert: KubeControllerManagerDown
annotations:
message: KubeControllerManager has disappeared from Prometheus target discovery.
@@ -34,4 +35,5 @@
labels:
severity: critical
{{- end }}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml
@@ -4,7 +4,8 @@
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
-{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler }}
+{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
@@ -24,7 +25,7 @@
groups:
- name: kubernetes-system-scheduler
rules:
-{{- if .Values.kubeScheduler.enabled }}
+{{- if (include "exporter.kubeScheduler.enabled" .)}}
- alert: KubeSchedulerDown
annotations:
message: KubeScheduler has disappeared from Prometheus target discovery.
@@ -34,4 +35,5 @@
labels:
severity: critical
{{- end }}
+{{- end }}
{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml
@@ -32,9 +32,9 @@
containers:
- name: create
{{- if .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
- image: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
{{- else }}
- image: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}
{{- end }}
imagePullPolicy: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.pullPolicy }}
args:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml
@@ -32,9 +32,9 @@
containers:
- name: patch
{{- if .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
- image: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }}
{{- else }}
- image: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}
{{- end }}
imagePullPolicy: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.pullPolicy }}
args:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/cleanup-crds.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/cleanup-crds.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/cleanup-crds.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/cleanup-crds.yaml
@@ -1,49 +0,0 @@
-{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.cleanupCustomResource }}
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: {{ template "kube-prometheus-stack.fullname" . }}-operator-cleanup
- namespace: {{ template "kube-prometheus-stack.namespace" . }}
- annotations:
- "helm.sh/hook": pre-delete
- "helm.sh/hook-weight": "3"
- "helm.sh/hook-delete-policy": hook-succeeded
- labels:
- app: {{ template "kube-prometheus-stack.name" . }}-operator
-{{ include "kube-prometheus-stack.labels" . | indent 4 }}
-spec:
- template:
- metadata:
- name: {{ template "kube-prometheus-stack.fullname" . }}-operator-cleanup
- labels:
- app: {{ template "kube-prometheus-stack.name" . }}-operator
-{{ include "kube-prometheus-stack.labels" . | indent 8 }}
- spec:
- {{- if .Values.global.rbac.create }}
- serviceAccountName: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }}
- {{- end }}
- containers:
- - name: kubectl
- {{- if .Values.prometheusOperator.hyperkubeImage.sha }}
- image: {{ .Values.prometheusOperator.hyperkubeImage.repository }}:{{ .Values.prometheusOperator.hyperkubeImage.tag }}@sha256:{{ .Values.prometheusOperator.hyperkubeImage.sha }}
- {{- else }}
- image: "{{ .Values.prometheusOperator.hyperkubeImage.repository }}:{{ .Values.prometheusOperator.hyperkubeImage.tag }}"
- {{- end }}
- imagePullPolicy: "{{ .Values.prometheusOperator.hyperkubeImage.pullPolicy }}"
- command:
- - /bin/sh
- - -c
- - >
- kubectl delete alertmanager --all;
- kubectl delete prometheus --all;
- kubectl delete prometheusrule --all;
- kubectl delete servicemonitor --all;
- sleep 10;
- kubectl delete crd alertmanagers.monitoring.coreos.com;
- kubectl delete crd prometheuses.monitoring.coreos.com;
- kubectl delete crd prometheusrules.monitoring.coreos.com;
- kubectl delete crd servicemonitors.monitoring.coreos.com;
- kubectl delete crd podmonitors.monitoring.coreos.com;
- kubectl delete crd thanosrulers.monitoring.coreos.com;
- restartPolicy: OnFailure
-{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/clusterrole.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/clusterrole.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/clusterrole.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/clusterrole.yaml
@@ -7,7 +7,7 @@
app: {{ template "kube-prometheus-stack.name" . }}-operator
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
rules:
-{{- if or .Values.prometheusOperator.manageCrds .Values.prometheusOperator.cleanupCustomResource }}
+{{- if .Values.prometheusOperator.manageCrds }}
- apiGroups:
- apiextensions.k8s.io
resources:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/crds.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/crds.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/crds.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/crds.yaml
@@ -1,6 +0,0 @@
-{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}}
-{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }}
-{{ $.Files.Get $path }}
----
-{{- end }}
-{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus-operator/deployment.yaml packages/rancher-monitoring/charts/templates/prometheus-operator/deployment.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus-operator/deployment.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus-operator/deployment.yaml
@@ -33,9 +33,9 @@
containers:
- name: {{ template "kube-prometheus-stack.name" . }}
{{- if .Values.prometheusOperator.image.sha }}
- image: "{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}@sha256:{{ .Values.prometheusOperator.image.sha }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}@sha256:{{ .Values.prometheusOperator.image.sha }}"
{{- else }}
- image: "{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}"
+ image: "{{ template "system_default_registry" . }}{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}"
{{- end }}
imagePullPolicy: "{{ .Values.prometheusOperator.image.pullPolicy }}"
args:
@@ -64,14 +64,14 @@
- --logtostderr=true
- --localhost=127.0.0.1
{{- if .Values.prometheusOperator.prometheusConfigReloaderImage.sha }}
- - --prometheus-config-reloader={{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}@sha256:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.sha }}
+ - --prometheus-config-reloader={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}@sha256:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.sha }}
{{- else }}
- - --prometheus-config-reloader={{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}
+ - --prometheus-config-reloader={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}
{{- end }}
{{- if .Values.prometheusOperator.configmapReloadImage.sha }}
- - --config-reloader-image={{ .Values.prometheusOperator.configmapReloadImage.repository }}:{{ .Values.prometheusOperator.configmapReloadImage.tag }}@sha256:{{ .Values.prometheusOperator.configmapReloadImage.sha }}
+ - --config-reloader-image={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.configmapReloadImage.repository }}:{{ .Values.prometheusOperator.configmapReloadImage.tag }}@sha256:{{ .Values.prometheusOperator.configmapReloadImage.sha }}
{{- else }}
- - --config-reloader-image={{ .Values.prometheusOperator.configmapReloadImage.repository }}:{{ .Values.prometheusOperator.configmapReloadImage.tag }}
+ - --config-reloader-image={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.configmapReloadImage.repository }}:{{ .Values.prometheusOperator.configmapReloadImage.tag }}
{{- end }}
- --config-reloader-cpu={{ .Values.prometheusOperator.configReloaderCpu }}
- --config-reloader-memory={{ .Values.prometheusOperator.configReloaderMemory }}
@@ -89,9 +89,9 @@
{{- if .Values.prometheusOperator.tlsProxy.enabled }}
- name: tls-proxy
{{- if .Values.prometheusOperator.tlsProxy.image.sha }}
- image: {{ .Values.prometheusOperator.tlsProxy.image.repository }}:{{ .Values.prometheusOperator.tlsProxy.image.tag }}@sha256:{{ .Values.prometheusOperator.tlsProxy.image.sha }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.tlsProxy.image.repository }}:{{ .Values.prometheusOperator.tlsProxy.image.tag }}@sha256:{{ .Values.prometheusOperator.tlsProxy.image.sha }}
{{- else }}
- image: {{ .Values.prometheusOperator.tlsProxy.image.repository }}:{{ .Values.prometheusOperator.tlsProxy.image.tag }}
+ image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.tlsProxy.image.repository }}:{{ .Values.prometheusOperator.tlsProxy.image.tag }}
{{- end }}
imagePullPolicy: {{ .Values.prometheusOperator.tlsProxy.image.pullPolicy }}
args:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/rancher-monitoring/clusterrole.yaml packages/rancher-monitoring/charts/templates/rancher-monitoring/clusterrole.yaml
--- packages/rancher-monitoring/charts-original/templates/rancher-monitoring/clusterrole.yaml
+++ packages/rancher-monitoring/charts/templates/rancher-monitoring/clusterrole.yaml
@@ -0,0 +1,148 @@
+{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: monitoring-admin
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
+ {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ {{- end }}
+rules:
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - alertmanagers
+ - prometheuses
+ - prometheuses/finalizers
+ - alertmanagers/finalizers
+ verbs:
+ - 'get'
+ - 'list'
+ - 'watch'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - thanosrulers
+ - thanosrulers/finalizers
+ - servicemonitors
+ - podmonitors
+ - prometheusrules
+ - podmonitors
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: monitoring-edit
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
+ {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ {{- end }}
+rules:
+rules:
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - alertmanagers
+ - prometheuses
+ - prometheuses/finalizers
+ - alertmanagers/finalizers
+ verbs:
+ - 'get'
+ - 'list'
+ - 'watch'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - thanosrulers
+ - thanosrulers/finalizers
+ - servicemonitors
+ - podmonitors
+ - prometheusrules
+ - podmonitors
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: monitoring-view
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
+ {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ {{- end }}
+rules:
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - alertmanagers
+ - prometheuses
+ - prometheuses/finalizers
+ - alertmanagers/finalizers
+ - thanosrulers
+ - thanosrulers/finalizers
+ - servicemonitors
+ - podmonitors
+ - prometheusrules
+ - podmonitors
+ verbs:
+ - 'get'
+ - 'list'
+ - 'watch'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - 'get'
+ - 'list'
+ - 'watch'
+{{- if .Values.grafana.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: grafana-config-edit
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: grafana-config-view
+ labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - 'get'
+ - 'list'
+ - 'watch'
+{{- end }}
+{{- end }}
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/values.yaml packages/rancher-monitoring/charts/values.yaml
--- packages/rancher-monitoring/charts-original/values.yaml
+++ packages/rancher-monitoring/charts/values.yaml
@@ -2,13 +2,271 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
+# Rancher Monitoring Configuration
+
+## Configuration for prometheus-adapter
+## ref: https://github.com/helm/charts/tree/master/stable/prometheus-adapter
+##
+prometheus-adapter:
+ enabled: true
+ prometheus:
+ # Change this if you change the namespaceOverride or nameOverride of prometheus-operator
+ url: http://rancher-monitoring-prometheus.cattle-monitoring-system.svc
+ port: 9090
+ image:
+ repository: rancher/directxman12-k8s-prometheus-adapter-amd64
+ tag: v0.6.0
+ pullPolicy: IfNotPresent
+ pullSecrets: {}
+
+## RKE PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
+##
+rkeControllerManager:
+ enabled: false
+ metricsPort: 10252
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ nodeSelector:
+ node-role.kubernetes.io/controlplane: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeScheduler:
+ enabled: false
+ metricsPort: 10251
+ component: kube-scheduler
+ clients:
+ port: 10012
+ nodeSelector:
+ node-role.kubernetes.io/controlplane: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeProxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeEtcd:
+ enabled: false
+ metricsPort: 2379
+ component: kube-etcd
+ clients:
+ port: 10014
+ https:
+ enabled: true
+ certDir: /etc/kubernetes/ssl
+ certFile: kube-etcd-*.pem
+ keyFile: kube-etcd-*-key.pem
+ caCertFile: kube-ca.pem
+ nodeSelector:
+ node-role.kubernetes.io/etcd: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+## k3s PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
+##
+k3sControllerManager:
+ enabled: false
+ metricsPort: 10252
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+k3sScheduler:
+ enabled: false
+ metricsPort: 10251
+ component: kube-scheduler
+ clients:
+ port: 10012
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+k3sProxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+## KubeADM PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
+##
+kubeAdmControllerManager:
+ enabled: false
+ metricsPort: 10257
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmScheduler:
+ enabled: false
+ metricsPort: 10259
+ component: kube-scheduler
+ clients:
+ port: 10012
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmProxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmEtcd:
+ enabled: false
+ metricsPort: 2381
+ component: kube-etcd
+ clients:
+ port: 10014
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+## rke2 PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
+##
+rke2ControllerManager:
+ enabled: false
+ metricsPort: 10252
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Scheduler:
+ enabled: false
+ metricsPort: 10251
+ component: kube-scheduler
+ clients:
+ port: 10012
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Proxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Etcd:
+ enabled: false
+ metricsPort: 2381
+ component: kube-etcd
+ clients:
+ port: 10014
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/etcd: "true"
+ tolerations:
+ - effect: "NoSchedule"
+ key: node-role.kubernetes.io/master
+ operator: "Equal"
+
+# Prometheus Operator Configuration
+
## Provide a name in place of kube-prometheus-stack for `app:` labels
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
##
-nameOverride: ""
+nameOverride: "rancher-monitoring"
## Override the deployment namespace
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
##
-namespaceOverride: ""
+namespaceOverride: "cattle-monitoring-system"
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
##
@@ -76,8 +334,19 @@
##
global:
+ cattle:
+ systemDefaultRegistry: ""
rbac:
+ ## Create RBAC resources for ServiceAccounts and users
+ ##
create: true
+
+ userRoles:
+ ## Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets
+ create: true
+ ## Aggregate default user ClusterRoles into default k8s ClusterRoles
+ aggregateToDefaultRoles: true
+
pspEnabled: true
pspAnnotations: {}
## Specify pod annotations
@@ -130,6 +399,22 @@
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
## https://prometheus.io/webtools/alerting/routing-tree-editor/
##
+ ## Example Slack Config
+ ## config:
+ ## route:
+ ## group_by: ['job']
+ ## group_wait: 30s
+ ## group_interval: 5m
+ ## repeat_interval: 3h
+ ## receiver: 'slack-notifications'
+ ## receivers:
+ ## - name: 'slack-notifications'
+ ## slack_configs:
+ ## - send_resolved: true
+ ## text: '{{ template "slack.rancher.text" . }}'
+ ## api_url: <slack-webhook-url-here>
+ ## templates:
+ ## - /etc/alertmanager/config/*.tmpl
config:
global:
resolve_timeout: 5m
@@ -145,6 +430,8 @@
receiver: 'null'
receivers:
- name: 'null'
+ templates:
+ - /etc/alertmanager/config/*.tmpl
## Pass the Alertmanager configuration directives through Helm's templating
## engine. If the Alertmanager configuration contains Alertmanager templates,
@@ -160,25 +447,76 @@
## ref: https://prometheus.io/docs/alerting/notifications/
## https://prometheus.io/docs/alerting/notification_examples/
##
- templateFiles: {}
- #
- ## An example template:
- # template_1.tmpl: |-
- # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
- #
- # {{ define "slack.myorg.text" }}
- # {{- $root := . -}}
- # {{ range .Alerts }}
- # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
- # *Cluster:* {{ template "cluster" $root }}
- # *Description:* {{ .Annotations.description }}
- # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
- # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
- # *Details:*
- # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- # {{ end }}
- # {{ end }}
- # {{ end }}
+ templateFiles:
+ rancher_defaults.tmpl: |-
+ {{- define "slack.rancher.text" -}}
+ {{ template "rancher.text_multiple" . }}
+ {{- end -}}
+
+ {{- define "rancher.text_multiple" -}}
+ *[GROUP - Details]*
+ One or more alarms in this group have triggered a notification.
+
+ {{- if gt (len .GroupLabels.Values) 0 }}
+ *Group Labels:*
+ {{- range .GroupLabels.SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- if .ExternalURL }}
+ *Link to AlertManager:* {{ .ExternalURL }}
+ {{- end }}
+
+ {{- range .Alerts }}
+ {{ template "rancher.text_single" . }}
+ {{- end }}
+ {{- end -}}
+
+ {{- define "rancher.text_single" -}}
+ {{- if .Labels.alertname }}
+ *[ALERT - {{ .Labels.alertname }}]*
+ {{- else }}
+ *[ALERT]*
+ {{- end }}
+ {{- if .Labels.severity }}
+ *Severity:* `{{ .Labels.severity }}`
+ {{- end }}
+ {{- if .Labels.cluster }}
+ *Cluster:* {{ .Labels.cluster }}
+ {{- end }}
+ {{- if .Annotations.summary }}
+ *Summary:* {{ .Annotations.summary }}
+ {{- end }}
+ {{- if .Annotations.message }}
+ *Message:* {{ .Annotations.message }}
+ {{- end }}
+ {{- if .Annotations.description }}
+ *Description:* {{ .Annotations.description }}
+ {{- end }}
+ {{- if .Annotations.runbook_url }}
+ *Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:>
+ {{- end }}
+ {{- with .Labels }}
+ {{- with .Remove (stringSlice "alertname" "severity" "cluster") }}
+ {{- if gt (len .) 0 }}
+ *Additional Labels:*
+ {{- range .SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- with .Annotations }}
+ {{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }}
+ {{- if gt (len .) 0 }}
+ *Additional Annotations:*
+ {{- range .SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end -}}
ingress:
enabled: false
@@ -208,6 +546,21 @@
## Configuration for Alertmanager secret
##
secret:
+
+ # Should the Alertmanager Config Secret be cleaned up on an uninstall?
+ # This is set to false by default to prevent the loss of alerting configuration on an uninstall
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
+ #
+ cleanupOnUninstall: false
+
+ # The image used to manage the Alertmanager Config Secret's lifecycle
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
+ #
+ image:
+ repository: rancher/rancher-agent
+ tag: v2.4.8
+ pullPolicy: IfNotPresent
+
annotations: {}
## Configuration for creating an Ingress that will map to each Alertmanager replica service
@@ -334,7 +687,7 @@
## Image of Alertmanager
##
image:
- repository: quay.io/prometheus/alertmanager
+ repository: rancher/prom-alertmanager
tag: v0.21.0
sha: ""
@@ -410,9 +763,13 @@
## Define resources requests and limits for single Pods.
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
- resources: {}
- # requests:
- # memory: 400Mi
+ resources:
+ limits:
+ memory: 500Mi
+ cpu: 1000m
+ requests:
+ memory: 100Mi
+ cpu: 100m
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
@@ -487,6 +844,9 @@
enabled: true
namespaceOverride: ""
+ deploymentStrategy:
+ type: Recreate
+
## Deploy default dashboards.
##
defaultDashboardsEnabled: true
@@ -530,6 +890,7 @@
dashboards:
enabled: true
label: grafana_dashboard
+ searchNamespace: grafana-dashboards
## Annotations for Grafana dashboard configmaps
##
@@ -548,6 +909,7 @@
## ref: https://git.io/fjaBS
createPrometheusReplicasDatasources: false
label: grafana_datasource
+ searchNamespace: grafana-datasources
extraConfigmapMounts: []
# - name: certs-configmap
@@ -575,6 +937,19 @@
##
service:
portName: service
+ ## Port for Grafana Service to listen on
+ ##
+ port: 80
+ ## To be used with a proxy extraContainer port
+ ##
+ targetPort: 3000
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30950
+ ## Service type
+ ##
+ type: ClusterIP
## If true, create a serviceMonitor for grafana
##
@@ -600,6 +975,14 @@
# targetLabel: nodename
# replacement: $1
# action: replace
+
+ resources:
+ limits:
+ memory: 200Mi
+ cpu: 200m
+ requests:
+ memory: 100Mi
+ cpu: 100m
## Component scraping the kube api server
##
@@ -756,7 +1139,7 @@
## Component scraping the kube controller manager
##
kubeControllerManager:
- enabled: true
+ enabled: false
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
##
@@ -889,7 +1272,7 @@
## Component scraping etcd
##
kubeEtcd:
- enabled: true
+ enabled: false
## If your etcd is not deployed as a pod, specify IPs it can be found on
##
@@ -949,7 +1332,7 @@
## Component scraping kube scheduler
##
kubeScheduler:
- enabled: true
+ enabled: false
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
##
@@ -1002,7 +1385,7 @@
## Component scraping kube proxy
##
kubeProxy:
- enabled: true
+ enabled: false
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
##
@@ -1076,6 +1459,13 @@
create: true
podSecurityPolicy:
enabled: true
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 130Mi
## Deploy node exporter as a daemonset to all nodes
##
@@ -1125,6 +1515,16 @@
extraArgs:
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
+ service:
+ port: 9796
+ targetPort: 9796
+ resources:
+ limits:
+ cpu: 200m
+ memory: 50Mi
+ requests:
+ cpu: 100m
+ memory: 30Mi
## Manages Prometheus and Alertmanager components
##
@@ -1138,7 +1538,7 @@
tlsProxy:
enabled: true
image:
- repository: squareup/ghostunnel
+ repository: rancher/squareup-ghostunnel
tag: v1.5.2
sha: ""
pullPolicy: IfNotPresent
@@ -1156,7 +1556,7 @@
patch:
enabled: true
image:
- repository: jettech/kube-webhook-certgen
+ repository: rancher/jettech-kube-webhook-certgen
tag: v1.2.1
sha: ""
pullPolicy: IfNotPresent
@@ -1285,13 +1685,13 @@
## Resource limits & requests
##
- resources: {}
- # limits:
- # cpu: 200m
- # memory: 200Mi
- # requests:
- # cpu: 100m
- # memory: 100Mi
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
@@ -1335,7 +1735,7 @@
## Prometheus-operator image
##
image:
- repository: quay.io/coreos/prometheus-operator
+ repository: rancher/coreos-prometheus-operator
tag: v0.38.1
sha: ""
pullPolicy: IfNotPresent
@@ -1343,14 +1743,14 @@
## Configmap-reload image to use for reloading configmaps
##
configmapReloadImage:
- repository: docker.io/jimmidyson/configmap-reload
+ repository: rancher/jimmidyson-configmap-reload
tag: v0.3.0
sha: ""
## Prometheus-config-reloader image to use for config and rule reloading
##
prometheusConfigReloaderImage:
- repository: quay.io/coreos/prometheus-config-reloader
+ repository: rancher/coreos-prometheus-config-reloader
tag: v0.38.1
sha: ""
@@ -1366,14 +1766,6 @@
##
secretFieldSelector: ""
- ## Hyperkube image to use when cleaning up
- ##
- hyperkubeImage:
- repository: k8s.gcr.io/hyperkube
- tag: v1.16.12
- sha: ""
- pullPolicy: IfNotPresent
-
## Deploy a Prometheus instance
##
prometheus:
@@ -1614,7 +2006,7 @@
## Image of Prometheus.
##
image:
- repository: quay.io/prometheus/prometheus
+ repository: rancher/prom-prometheus
tag: v2.18.2
sha: ""
@@ -1666,6 +2058,11 @@
##
externalUrl: ""
+ ## Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs
+ ## If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into
+ ##
+ ignoreNamespaceSelectors: true
+
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
@@ -1698,7 +2095,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the PrometheusRule resources created
##
- ruleSelectorNilUsesHelmValues: true
+ ruleSelectorNilUsesHelmValues: false
## PrometheusRules to be selected for target discovery.
## If {}, select all ServiceMonitors
@@ -1723,7 +2120,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the servicemonitors created
##
- serviceMonitorSelectorNilUsesHelmValues: true
+ serviceMonitorSelectorNilUsesHelmValues: false
## ServiceMonitors to be selected for target discovery.
## If {}, select all ServiceMonitors
@@ -1743,7 +2140,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the podmonitors created
##
- podMonitorSelectorNilUsesHelmValues: true
+ podMonitorSelectorNilUsesHelmValues: false
## PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors
@@ -1840,9 +2237,13 @@
## Resource limits & requests
##
- resources: {}
- # requests:
- # memory: 400Mi
+ resources:
+ limits:
+ memory: 1500Mi
+ cpu: 1000m
+ requests:
+ memory: 750Mi
+ cpu: 750m
## Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md