(dev-v2.6-archive) Support monitoring in hardened RKE clusters

This commit adds support for deploying rancher-monitoring into hardened clusters.

It modifies some of the default securityContexts and does some misc. fixes such as:
- Removing default AppArmor PSP annotations from Grafana (related to https://github.com/helm/charts/issues/9090)
- Modifying rkeScheduler and rkeControllerManager to use localhost to scrape components since the endpoints aren't exposed in a hardened cluster

These changes have been verified on a hardened RKE cluster.

(partially cherry picked from commit e3d6033572)
pull/1680/head
Arvind Iyengar 2020-09-20 17:11:48 -07:00
parent 0eded521bd
commit ae60d235cb
No known key found for this signature in database
GPG Key ID: A8DD9BFD6C811498
5 changed files with 131 additions and 122 deletions

View File

@ -38,3 +38,4 @@ All notable changes from the upstream Prometheus Operator chart will be added to
- Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`. - Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`.
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream. - Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`. - Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and set `grafana.rbac.pspUseAppArmor=false` in order to make it possible to deploy this chart on a hardened cluster without AppArmor installed.

View File

@ -196,6 +196,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/values.yaml packages/rancher-monitoring/charts/charts/grafana/values.yaml diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/values.yaml packages/rancher-monitoring/charts/charts/grafana/values.yaml
--- packages/rancher-monitoring/charts-original/charts/grafana/values.yaml --- packages/rancher-monitoring/charts-original/charts/grafana/values.yaml
+++ packages/rancher-monitoring/charts/charts/grafana/values.yaml +++ packages/rancher-monitoring/charts/charts/grafana/values.yaml
@@ -1,7 +1,7 @@
rbac:
create: true
pspEnabled: true
- pspUseAppArmor: true
+ pspUseAppArmor: false
namespaced: false
extraRoleRules: []
# - apiGroups: []
@@ -49,7 +49,7 @@ @@ -49,7 +49,7 @@
# schedulerName: "default-scheduler" # schedulerName: "default-scheduler"
@ -205,7 +214,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
tag: 7.1.5 tag: 7.1.5
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -63,7 +63,7 @@ @@ -63,12 +63,15 @@
testFramework: testFramework:
enabled: true enabled: true
@ -213,8 +222,17 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
+ image: "rancher/bats-bats" + image: "rancher/bats-bats"
tag: "v1.1.0" tag: "v1.1.0"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: {} - securityContext: {}
@@ -91,7 +91,7 @@ + securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
securityContext:
+ runAsNonRoot: true
runAsUser: 472
runAsGroup: 472
fsGroup: 472
@@ -91,7 +94,7 @@
# priorityClassName: # priorityClassName:
downloadDashboardsImage: downloadDashboardsImage:
@ -223,7 +241,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
tag: 7.70.0 tag: 7.70.0
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -244,7 +244,7 @@ @@ -244,7 +247,7 @@
## initChownData container image ## initChownData container image
## ##
image: image:
@ -232,7 +250,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
tag: "1.31.1" tag: "1.31.1"
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -486,7 +486,7 @@ @@ -486,7 +489,7 @@
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar: sidecar:
image: image:
@ -244,7 +262,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml
--- packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml --- packages/rancher-monitoring/charts-original/charts/kube-state-metrics/templates/deployment.yaml
+++ packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml +++ packages/rancher-monitoring/charts/charts/kube-state-metrics/templates/deployment.yaml
@@ -154,7 +154,7 @@ @@ -44,6 +44,7 @@
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
+ runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
@@ -154,7 +155,7 @@
- --pod-namespace=$(POD_NAMESPACE) - --pod-namespace=$(POD_NAMESPACE)
{{ end }} {{ end }}
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
@ -265,6 +291,14 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
tag: v1.9.7 tag: v1.9.7
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -73,6 +73,7 @@
securityContext:
enabled: true
+ runAsNonRoot: true
runAsGroup: 65534
runAsUser: 65534
fsGroup: 65534
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md
--- packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md --- packages/rancher-monitoring/charts-original/charts/prometheus-adapter/README.md
+++ packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md +++ packages/rancher-monitoring/charts/charts/prometheus-adapter/README.md
@ -301,6 +335,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
tag: v0.6.0 tag: v0.6.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -139,3 +139,7 @@
# API server unable to communicate with metrics-server. As an example, this is required
# if you use Weave network on EKS
enabled: false
+
+securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
\ No newline at end of file
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS
--- packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS --- packages/rancher-monitoring/charts-original/charts/prometheus-node-exporter/OWNERS
+++ packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS +++ packages/rancher-monitoring/charts/charts/prometheus-node-exporter/OWNERS
@ -577,7 +620,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml
--- packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml --- packages/rancher-monitoring/charts-original/templates/alertmanager/cleanupSecret.yaml
+++ packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml +++ packages/rancher-monitoring/charts/templates/alertmanager/cleanupSecret.yaml
@@ -0,0 +1,82 @@ @@ -0,0 +1,86 @@
+{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) (.Values.alertmanager.secret.cleanupOnUninstall) }} +{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) (.Values.alertmanager.secret.cleanupOnUninstall) }}
+apiVersion: batch/v1 +apiVersion: batch/v1
+kind: Job +kind: Job
@ -599,6 +642,10 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ spec: + spec:
+ serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete
+{{- if .Values.alertmanager.secret.securityContext }}
+ securityContext:
+{{ toYaml .Values.alertmanager.secret.securityContext | indent 8 }}
+{{- end }}
+ containers: + containers:
+ - name: delete-secret + - name: delete-secret
+ image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }} + image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }}
@ -686,7 +733,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
{{ toYaml .Values.alertmanager.secret.annotations | indent 4 }} {{ toYaml .Values.alertmanager.secret.annotations | indent 4 }}
{{- end }} {{- end }}
labels: labels:
@@ -20,4 +28,93 @@ @@ -20,4 +28,97 @@
{{- range $key, $val := .Values.alertmanager.templateFiles }} {{- range $key, $val := .Values.alertmanager.templateFiles }}
{{ $key }}: {{ $val | b64enc | quote }} {{ $key }}: {{ $val | b64enc | quote }}
{{- end }} {{- end }}
@ -711,6 +758,10 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
+ app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
+ spec: + spec:
+ serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install
+{{- if .Values.alertmanager.secret.securityContext }}
+ securityContext:
+{{ toYaml .Values.alertmanager.secret.securityContext | indent 8 }}
+{{- end }}
+ containers: + containers:
+ - name: copy-pre-install-secret + - name: copy-pre-install-secret
+ image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }} + image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }}
@ -1728,7 +1779,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/values.yaml packages/rancher-monitoring/charts/values.yaml diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/values.yaml packages/rancher-monitoring/charts/values.yaml
--- packages/rancher-monitoring/charts-original/values.yaml --- packages/rancher-monitoring/charts-original/values.yaml
+++ packages/rancher-monitoring/charts/values.yaml +++ packages/rancher-monitoring/charts/values.yaml
@@ -2,13 +2,271 @@ @@ -2,13 +2,273 @@
# This is a YAML-formatted file. # This is a YAML-formatted file.
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
@ -1758,6 +1809,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
+ component: kube-controller-manager + component: kube-controller-manager
+ clients: + clients:
+ port: 10011 + port: 10011
+ useLocalhost: true
+ nodeSelector: + nodeSelector:
+ node-role.kubernetes.io/controlplane: "true" + node-role.kubernetes.io/controlplane: "true"
+ tolerations: + tolerations:
@ -1772,6 +1824,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
+ component: kube-scheduler + component: kube-scheduler
+ clients: + clients:
+ port: 10012 + port: 10012
+ useLocalhost: true
+ nodeSelector: + nodeSelector:
+ node-role.kubernetes.io/controlplane: "true" + node-role.kubernetes.io/controlplane: "true"
+ tolerations: + tolerations:
@ -2002,7 +2055,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 ## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
## ##
@@ -76,8 +334,19 @@ @@ -76,8 +336,19 @@
## ##
global: global:
@ -2022,7 +2075,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
pspEnabled: true pspEnabled: true
pspAnnotations: {} pspAnnotations: {}
## Specify pod annotations ## Specify pod annotations
@@ -130,6 +399,22 @@ @@ -130,6 +401,22 @@
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
## https://prometheus.io/webtools/alerting/routing-tree-editor/ ## https://prometheus.io/webtools/alerting/routing-tree-editor/
## ##
@ -2045,7 +2098,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
config: config:
global: global:
resolve_timeout: 5m resolve_timeout: 5m
@@ -145,6 +430,8 @@ @@ -145,6 +432,8 @@
receiver: 'null' receiver: 'null'
receivers: receivers:
- name: 'null' - name: 'null'
@ -2054,7 +2107,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Pass the Alertmanager configuration directives through Helm's templating ## Pass the Alertmanager configuration directives through Helm's templating
## engine. If the Alertmanager configuration contains Alertmanager templates, ## engine. If the Alertmanager configuration contains Alertmanager templates,
@@ -160,25 +447,76 @@ @@ -160,25 +449,76 @@
## ref: https://prometheus.io/docs/alerting/notifications/ ## ref: https://prometheus.io/docs/alerting/notifications/
## https://prometheus.io/docs/alerting/notification_examples/ ## https://prometheus.io/docs/alerting/notification_examples/
## ##
@ -2150,7 +2203,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
ingress: ingress:
enabled: false enabled: false
@@ -208,6 +546,21 @@ @@ -208,6 +548,25 @@
## Configuration for Alertmanager secret ## Configuration for Alertmanager secret
## ##
secret: secret:
@ -2168,11 +2221,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
+ repository: rancher/rancher-agent + repository: rancher/rancher-agent
+ tag: v2.4.8 + tag: v2.4.8
+ pullPolicy: IfNotPresent + pullPolicy: IfNotPresent
+
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ +
annotations: {} annotations: {}
## Configuration for creating an Ingress that will map to each Alertmanager replica service ## Configuration for creating an Ingress that will map to each Alertmanager replica service
@@ -334,7 +687,7 @@ @@ -334,7 +693,7 @@
## Image of Alertmanager ## Image of Alertmanager
## ##
image: image:
@ -2181,7 +2238,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v0.21.0 tag: v0.21.0
sha: "" sha: ""
@@ -410,9 +763,13 @@ @@ -410,9 +769,13 @@
## Define resources requests and limits for single Pods. ## Define resources requests and limits for single Pods.
## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## ##
@ -2198,7 +2255,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
@@ -487,6 +844,9 @@ @@ -487,6 +850,9 @@
enabled: true enabled: true
namespaceOverride: "" namespaceOverride: ""
@ -2208,7 +2265,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Deploy default dashboards. ## Deploy default dashboards.
## ##
defaultDashboardsEnabled: true defaultDashboardsEnabled: true
@@ -530,6 +890,7 @@ @@ -530,6 +896,7 @@
dashboards: dashboards:
enabled: true enabled: true
label: grafana_dashboard label: grafana_dashboard
@ -2216,7 +2273,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Annotations for Grafana dashboard configmaps ## Annotations for Grafana dashboard configmaps
## ##
@@ -575,6 +936,19 @@ @@ -575,6 +942,19 @@
## ##
service: service:
portName: service portName: service
@ -2236,7 +2293,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If true, create a serviceMonitor for grafana ## If true, create a serviceMonitor for grafana
## ##
@@ -600,6 +974,14 @@ @@ -600,6 +980,14 @@
# targetLabel: nodename # targetLabel: nodename
# replacement: $1 # replacement: $1
# action: replace # action: replace
@ -2251,7 +2308,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Component scraping the kube api server ## Component scraping the kube api server
## ##
@@ -756,7 +1138,7 @@ @@ -756,7 +1144,7 @@
## Component scraping the kube controller manager ## Component scraping the kube controller manager
## ##
kubeControllerManager: kubeControllerManager:
@ -2260,7 +2317,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
## ##
@@ -889,7 +1271,7 @@ @@ -889,7 +1277,7 @@
## Component scraping etcd ## Component scraping etcd
## ##
kubeEtcd: kubeEtcd:
@ -2269,7 +2326,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your etcd is not deployed as a pod, specify IPs it can be found on ## If your etcd is not deployed as a pod, specify IPs it can be found on
## ##
@@ -949,7 +1331,7 @@ @@ -949,7 +1337,7 @@
## Component scraping kube scheduler ## Component scraping kube scheduler
## ##
kubeScheduler: kubeScheduler:
@ -2278,7 +2335,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
## ##
@@ -1002,7 +1384,7 @@ @@ -1002,7 +1390,7 @@
## Component scraping kube proxy ## Component scraping kube proxy
## ##
kubeProxy: kubeProxy:
@ -2287,7 +2344,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
## ##
@@ -1076,6 +1458,13 @@ @@ -1076,6 +1464,13 @@
create: true create: true
podSecurityPolicy: podSecurityPolicy:
enabled: true enabled: true
@ -2301,7 +2358,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Deploy node exporter as a daemonset to all nodes ## Deploy node exporter as a daemonset to all nodes
## ##
@@ -1125,6 +1514,16 @@ @@ -1125,6 +1520,16 @@
extraArgs: extraArgs:
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
@ -2318,7 +2375,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Manages Prometheus and Alertmanager components ## Manages Prometheus and Alertmanager components
## ##
@@ -1138,7 +1537,7 @@ @@ -1138,7 +1543,7 @@
tlsProxy: tlsProxy:
enabled: true enabled: true
image: image:
@ -2327,7 +2384,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v1.5.2 tag: v1.5.2
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -1156,7 +1555,7 @@ @@ -1156,7 +1561,7 @@
patch: patch:
enabled: true enabled: true
image: image:
@ -2336,7 +2393,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v1.2.1 tag: v1.2.1
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -1285,13 +1684,13 @@ @@ -1285,13 +1690,13 @@
## Resource limits & requests ## Resource limits & requests
## ##
@ -2357,7 +2414,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
@@ -1335,7 +1734,7 @@ @@ -1335,7 +1740,7 @@
## Prometheus-operator image ## Prometheus-operator image
## ##
image: image:
@ -2366,7 +2423,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v0.38.1 tag: v0.38.1
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@@ -1343,14 +1742,14 @@ @@ -1343,14 +1748,14 @@
## Configmap-reload image to use for reloading configmaps ## Configmap-reload image to use for reloading configmaps
## ##
configmapReloadImage: configmapReloadImage:
@ -2383,7 +2440,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v0.38.1 tag: v0.38.1
sha: "" sha: ""
@@ -1366,14 +1765,6 @@ @@ -1366,14 +1771,6 @@
## ##
secretFieldSelector: "" secretFieldSelector: ""
@ -2398,7 +2455,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Deploy a Prometheus instance ## Deploy a Prometheus instance
## ##
prometheus: prometheus:
@@ -1614,7 +2005,7 @@ @@ -1614,7 +2011,7 @@
## Image of Prometheus. ## Image of Prometheus.
## ##
image: image:
@ -2407,7 +2464,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v2.18.2 tag: v2.18.2
sha: "" sha: ""
@@ -1666,6 +2057,11 @@ @@ -1666,6 +2063,11 @@
## ##
externalUrl: "" externalUrl: ""
@ -2419,7 +2476,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Define which Nodes the Pods are scheduled on. ## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
## ##
@@ -1698,7 +2094,7 @@ @@ -1698,7 +2100,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment, ## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the PrometheusRule resources created ## which will also match the PrometheusRule resources created
## ##
@ -2428,7 +2485,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## PrometheusRules to be selected for target discovery. ## PrometheusRules to be selected for target discovery.
## If {}, select all ServiceMonitors ## If {}, select all ServiceMonitors
@@ -1723,7 +2119,7 @@ @@ -1723,7 +2125,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment, ## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the servicemonitors created ## which will also match the servicemonitors created
## ##
@ -2437,7 +2494,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## ServiceMonitors to be selected for target discovery. ## ServiceMonitors to be selected for target discovery.
## If {}, select all ServiceMonitors ## If {}, select all ServiceMonitors
@@ -1743,7 +2139,7 @@ @@ -1743,7 +2145,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment, ## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the podmonitors created ## which will also match the podmonitors created
## ##
@ -2446,7 +2503,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## PodMonitors to be selected for target discovery. ## PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors ## If {}, select all PodMonitors
@@ -1840,9 +2236,13 @@ @@ -1840,9 +2242,13 @@
## Resource limits & requests ## Resource limits & requests
## ##

View File

@ -1,74 +0,0 @@
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.client.name" . }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
readOnlyRootFilesystem: false
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumes:
- 'emptyDir'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
readOnly: true
{{- end }}
{{- end }}{{- end }}

View File

@ -12,7 +12,7 @@ spec:
template: template:
metadata: metadata:
labels: {{ include "pushProxy.client.labels" . | nindent 8 }} labels: {{ include "pushProxy.client.labels" . | nindent 8 }}
spec: spec:
{{- if .Values.clients.nodeSelector }} {{- if .Values.clients.nodeSelector }}
nodeSelector: {{ toYaml .Values.clients.nodeSelector | nindent 8 }} nodeSelector: {{ toYaml .Values.clients.nodeSelector | nindent 8 }}
{{- end }} {{- end }}
@ -21,7 +21,9 @@ spec:
{{- end }} {{- end }}
hostNetwork: true hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirstWithHostNet
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
serviceAccountName: {{ template "pushProxy.client.name" . }} serviceAccountName: {{ template "pushProxy.client.name" . }}
{{- end }}
containers: containers:
- name: pushprox-client - name: pushprox-client
image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }}
@ -59,9 +61,6 @@ spec:
value: :{{ .Values.clients.port }} value: :{{ .Values.clients.port }}
- name: PROXY_URL - name: PROXY_URL
value: {{ template "pushProxy.proxyUrl" . }} value: {{ template "pushProxy.proxyUrl" . }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumeMounts: volumeMounts:
- name: metrics-cert-dir - name: metrics-cert-dir
@ -115,12 +114,9 @@ spec:
value: /etc/ssl/push-proxy/push-proxy-key.pem value: /etc/ssl/push-proxy/push-proxy-key.pem
- name: CACERT_FILE_TARGET - name: CACERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem
securityContext:
runAsNonRoot: false
volumeMounts: volumeMounts:
- name: metrics-cert-dir-source - name: metrics-cert-dir-source
mountPath: /etc/source mountPath: /etc/source
readOnly: true
- name: metrics-cert-dir - name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy mountPath: /etc/ssl/push-proxy
volumes: volumes:
@ -131,4 +127,36 @@ spec:
- name: metrics-cert-dir - name: metrics-cert-dir
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
{{- end }}
{{- end }}{{- end }} {{- end }}{{- end }}

View File

@ -13,9 +13,6 @@ spec:
metadata: metadata:
labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }}
spec: spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if .Values.proxy.nodeSelector }} {{- if .Values.proxy.nodeSelector }}
nodeSelector: {{ toYaml .Values.proxy.nodeSelector | nindent 8 }} nodeSelector: {{ toYaml .Values.proxy.nodeSelector | nindent 8 }}
{{- end }} {{- end }}