mirror of https://git.rancher.io/charts
1331 lines
39 KiB
Diff
1331 lines
39 KiB
Diff
--- charts-original/values.yaml
|
||
+++ charts/values.yaml
|
||
@@ -2,18 +2,432 @@
|
||
# This is a YAML-formatted file.
|
||
# Declare variables to be passed into your templates.
|
||
|
||
+# Rancher Monitoring Configuration
|
||
+
|
||
+## Configuration for prometheus-adapter
|
||
+## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter
|
||
+##
|
||
+prometheus-adapter:
|
||
+ enabled: true
|
||
+ prometheus:
|
||
+ # Change this if you change the namespaceOverride or nameOverride of prometheus-operator
|
||
+ url: http://rancher-monitoring-prometheus.cattle-monitoring-system.svc
|
||
+ port: 9090
|
||
+ psp:
|
||
+ create: true
|
||
+
|
||
+## RKE PushProx Monitoring
|
||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||
+##
|
||
+rkeControllerManager:
|
||
+ enabled: false
|
||
+ metricsPort: 10252
|
||
+ component: kube-controller-manager
|
||
+ clients:
|
||
+ port: 10011
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/controlplane: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rkeScheduler:
|
||
+ enabled: false
|
||
+ metricsPort: 10251
|
||
+ component: kube-scheduler
|
||
+ clients:
|
||
+ port: 10012
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/controlplane: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rkeProxy:
|
||
+ enabled: false
|
||
+ metricsPort: 10249
|
||
+ component: kube-proxy
|
||
+ clients:
|
||
+ port: 10013
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rkeEtcd:
|
||
+ enabled: false
|
||
+ metricsPort: 2379
|
||
+ component: kube-etcd
|
||
+ clients:
|
||
+ port: 10014
|
||
+ https:
|
||
+ enabled: true
|
||
+ certDir: /etc/kubernetes/ssl
|
||
+ certFile: kube-etcd-*.pem
|
||
+ keyFile: kube-etcd-*-key.pem
|
||
+ caCertFile: kube-ca.pem
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/etcd: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rkeIngressNginx:
|
||
+ enabled: false
|
||
+ metricsPort: 10254
|
||
+ component: ingress-nginx
|
||
+ clients:
|
||
+ port: 10015
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/worker: "true"
|
||
+
|
||
+## k3s PushProx Monitoring
|
||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||
+##
|
||
+k3sServer:
|
||
+ enabled: false
|
||
+ metricsPort: 10250
|
||
+ component: k3s-server
|
||
+ clients:
|
||
+ port: 10013
|
||
+ useLocalhost: true
|
||
+ https:
|
||
+ enabled: true
|
||
+ useServiceAccountCredentials: true
|
||
+ insecureSkipVerify: true
|
||
+ rbac:
|
||
+ additionalRules:
|
||
+ - nonResourceURLs: ["/metrics/cadvisor"]
|
||
+ verbs: ["get"]
|
||
+ - apiGroups: [""]
|
||
+ resources: ["nodes/metrics"]
|
||
+ verbs: ["get"]
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+ serviceMonitor:
|
||
+ endpoints:
|
||
+ - port: metrics
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+ - port: metrics
|
||
+ path: /metrics/cadvisor
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+ - port: metrics
|
||
+ path: /metrics/probes
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+
|
||
+## KubeADM PushProx Monitoring
|
||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||
+##
|
||
+kubeAdmControllerManager:
|
||
+ enabled: false
|
||
+ metricsPort: 10257
|
||
+ component: kube-controller-manager
|
||
+ clients:
|
||
+ port: 10011
|
||
+ useLocalhost: true
|
||
+ https:
|
||
+ enabled: true
|
||
+ useServiceAccountCredentials: true
|
||
+ insecureSkipVerify: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/master: ""
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+kubeAdmScheduler:
|
||
+ enabled: false
|
||
+ metricsPort: 10259
|
||
+ component: kube-scheduler
|
||
+ clients:
|
||
+ port: 10012
|
||
+ useLocalhost: true
|
||
+ https:
|
||
+ enabled: true
|
||
+ useServiceAccountCredentials: true
|
||
+ insecureSkipVerify: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/master: ""
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+kubeAdmProxy:
|
||
+ enabled: false
|
||
+ metricsPort: 10249
|
||
+ component: kube-proxy
|
||
+ clients:
|
||
+ port: 10013
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+kubeAdmEtcd:
|
||
+ enabled: false
|
||
+ metricsPort: 2381
|
||
+ component: kube-etcd
|
||
+ clients:
|
||
+ port: 10014
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/master: ""
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+## rke2 PushProx Monitoring
|
||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||
+##
|
||
+rke2ControllerManager:
|
||
+ enabled: false
|
||
+ metricsPort: 10252
|
||
+ component: kube-controller-manager
|
||
+ clients:
|
||
+ port: 10011
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/master: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rke2Scheduler:
|
||
+ enabled: false
|
||
+ metricsPort: 10251
|
||
+ component: kube-scheduler
|
||
+ clients:
|
||
+ port: 10012
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/master: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rke2Proxy:
|
||
+ enabled: false
|
||
+ metricsPort: 10249
|
||
+ component: kube-proxy
|
||
+ clients:
|
||
+ port: 10013
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rke2Etcd:
|
||
+ enabled: false
|
||
+ metricsPort: 2381
|
||
+ component: kube-etcd
|
||
+ clients:
|
||
+ port: 10014
|
||
+ useLocalhost: true
|
||
+ nodeSelector:
|
||
+ node-role.kubernetes.io/etcd: "true"
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+rke2IngressNginx:
|
||
+ enabled: false
|
||
+ metricsPort: 10254
|
||
+ component: ingress-nginx
|
||
+ clients:
|
||
+ port: 10015
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+ affinity:
|
||
+ podAffinity:
|
||
+ requiredDuringSchedulingIgnoredDuringExecution:
|
||
+ - labelSelector:
|
||
+ matchExpressions:
|
||
+ - key: "app.kubernetes.io/component"
|
||
+ operator: "In"
|
||
+ values:
|
||
+ - "controller"
|
||
+ topologyKey: "kubernetes.io/hostname"
|
||
+ namespaces:
|
||
+ - "kube-system"
|
||
+ # in the RKE2 cluster, the ingress-nginx-controller is deployed as
|
||
+ # a Deployment with 1 pod when RKE2 version is <= 1.20,
|
||
+ # a DaemonSet when RKE2 version is >= 1.21
|
||
+ deployment:
|
||
+ enabled: false
|
||
+ replicas: 1
|
||
+
|
||
+
|
||
+
|
||
+## Additional PushProx Monitoring
|
||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||
+##
|
||
+
|
||
+# hardenedKubelet can only be deployed if kubelet.enabled=true
|
||
+# If enabled, it replaces the ServiceMonitor deployed by the default kubelet option with a
|
||
+# PushProx-based exporter that does not require a host port to be open to scrape metrics.
|
||
+hardenedKubelet:
|
||
+ enabled: false
|
||
+ metricsPort: 10250
|
||
+ component: kubelet
|
||
+ clients:
|
||
+ port: 10015
|
||
+ useLocalhost: true
|
||
+ https:
|
||
+ enabled: true
|
||
+ useServiceAccountCredentials: true
|
||
+ insecureSkipVerify: true
|
||
+ rbac:
|
||
+ additionalRules:
|
||
+ - nonResourceURLs: ["/metrics/cadvisor"]
|
||
+ verbs: ["get"]
|
||
+ - apiGroups: [""]
|
||
+ resources: ["nodes/metrics"]
|
||
+ verbs: ["get"]
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+ serviceMonitor:
|
||
+ endpoints:
|
||
+ - port: metrics
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+ - port: metrics
|
||
+ path: /metrics/cadvisor
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+ - port: metrics
|
||
+ path: /metrics/probes
|
||
+ honorLabels: true
|
||
+ relabelings:
|
||
+ - sourceLabels: [__metrics_path__]
|
||
+ targetLabel: metrics_path
|
||
+
|
||
+# hardenedNodeExporter can only be deployed if nodeExporter.enabled=true
|
||
+# If enabled, it replaces the ServiceMonitor deployed by the default nodeExporter with a
|
||
+# PushProx-based exporter that does not require a host port to be open to scrape metrics.
|
||
+hardenedNodeExporter:
|
||
+ enabled: false
|
||
+ metricsPort: 9796
|
||
+ component: node-exporter
|
||
+ clients:
|
||
+ port: 10016
|
||
+ useLocalhost: true
|
||
+ tolerations:
|
||
+ - effect: "NoExecute"
|
||
+ operator: "Exists"
|
||
+ - effect: "NoSchedule"
|
||
+ operator: "Exists"
|
||
+
|
||
+## Component scraping nginx-ingress-controller
|
||
+##
|
||
+ingressNginx:
|
||
+ enabled: false
|
||
+
|
||
+ ## The namespace to search for your nginx-ingress-controller
|
||
+ ##
|
||
+ namespace: ingress-nginx
|
||
+
|
||
+ service:
|
||
+ port: 9913
|
||
+ targetPort: 10254
|
||
+ # selector:
|
||
+ # app: ingress-nginx
|
||
+ serviceMonitor:
|
||
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
+ ##
|
||
+ interval: ""
|
||
+
|
||
+ ## metric relabel configs to apply to samples before ingestion.
|
||
+ ##
|
||
+ metricRelabelings: []
|
||
+ # - action: keep
|
||
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
+ # sourceLabels: [__name__]
|
||
+
|
||
+ # relabel configs to apply to samples before ingestion.
|
||
+ ##
|
||
+ relabelings: []
|
||
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
+ # separator: ;
|
||
+ # regex: ^(.*)$
|
||
+ # targetLabel: nodename
|
||
+ # replacement: $1
|
||
+ # action: replace
|
||
+
|
||
+# Prometheus Operator Configuration
|
||
+
|
||
## Provide a name in place of kube-prometheus-stack for `app:` labels
|
||
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
||
##
|
||
-nameOverride: ""
|
||
+nameOverride: "rancher-monitoring"
|
||
|
||
## Override the deployment namespace
|
||
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
||
##
|
||
-namespaceOverride: ""
|
||
+namespaceOverride: "cattle-monitoring-system"
|
||
|
||
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
|
||
##
|
||
kubeTargetVersionOverride: ""
|
||
|
||
+## Allow kubeVersion to be overridden while creating the ingress
|
||
+##
|
||
+kubeVersionOverride: ""
|
||
+
|
||
## Provide a name to substitute for the full names of resources
|
||
##
|
||
fullnameOverride: ""
|
||
@@ -89,8 +503,32 @@
|
||
|
||
##
|
||
global:
|
||
+ cattle:
|
||
+ systemDefaultRegistry: ""
|
||
+ ## Windows Monitoring
|
||
+ ## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-windows-exporter
|
||
+ ##
|
||
+ ## Deploys a DaemonSet of Prometheus exporters based on https://github.com/prometheus-community/windows_exporter.
|
||
+ ## Every Windows host must have a wins version of 0.1.0+ to use this chart (default as of Rancher 2.5.8).
|
||
+ ## To upgrade wins versions on Windows hosts, see https://github.com/rancher/wins/tree/master/charts/rancher-wins-upgrader.
|
||
+ ##
|
||
+ windows:
|
||
+ enabled: false
|
||
+ kubectl:
|
||
+ repository: rancher/kubectl
|
||
+ tag: v1.20.2
|
||
+ pullPolicy: IfNotPresent
|
||
rbac:
|
||
+ ## Create RBAC resources for ServiceAccounts and users
|
||
+ ##
|
||
create: true
|
||
+
|
||
+ userRoles:
|
||
+ ## Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets
|
||
+ create: true
|
||
+ ## Aggregate default user ClusterRoles into default k8s ClusterRoles
|
||
+ aggregateToDefaultRoles: true
|
||
+
|
||
pspEnabled: true
|
||
pspAnnotations: {}
|
||
## Specify pod annotations
|
||
@@ -117,6 +555,10 @@
|
||
##
|
||
enabled: true
|
||
|
||
+ ## Annotations for Alertmanager
|
||
+ ##
|
||
+ annotations: {}
|
||
+
|
||
## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
|
||
##
|
||
apiVersion: v2
|
||
@@ -143,6 +585,22 @@
|
||
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
|
||
## https://prometheus.io/webtools/alerting/routing-tree-editor/
|
||
##
|
||
+ ## Example Slack Config
|
||
+ ## config:
|
||
+ ## route:
|
||
+ ## group_by: ['job']
|
||
+ ## group_wait: 30s
|
||
+ ## group_interval: 5m
|
||
+ ## repeat_interval: 3h
|
||
+ ## receiver: 'slack-notifications'
|
||
+ ## receivers:
|
||
+ ## - name: 'slack-notifications'
|
||
+ ## slack_configs:
|
||
+ ## - send_resolved: true
|
||
+ ## text: '{{ template "slack.rancher.text" . }}'
|
||
+ ## api_url: <slack-webhook-url-here>
|
||
+ ## templates:
|
||
+ ## - /etc/alertmanager/config/*.tmpl
|
||
config:
|
||
global:
|
||
resolve_timeout: 5m
|
||
@@ -179,25 +637,76 @@
|
||
## ref: https://prometheus.io/docs/alerting/notifications/
|
||
## https://prometheus.io/docs/alerting/notification_examples/
|
||
##
|
||
- templateFiles: {}
|
||
- #
|
||
- ## An example template:
|
||
- # template_1.tmpl: |-
|
||
- # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
|
||
- #
|
||
- # {{ define "slack.myorg.text" }}
|
||
- # {{- $root := . -}}
|
||
- # {{ range .Alerts }}
|
||
- # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
|
||
- # *Cluster:* {{ template "cluster" $root }}
|
||
- # *Description:* {{ .Annotations.description }}
|
||
- # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
|
||
- # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
|
||
- # *Details:*
|
||
- # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||
- # {{ end }}
|
||
- # {{ end }}
|
||
- # {{ end }}
|
||
+ templateFiles:
|
||
+ rancher_defaults.tmpl: |-
|
||
+ {{- define "slack.rancher.text" -}}
|
||
+ {{ template "rancher.text_multiple" . }}
|
||
+ {{- end -}}
|
||
+
|
||
+ {{- define "rancher.text_multiple" -}}
|
||
+ *[GROUP - Details]*
|
||
+ One or more alarms in this group have triggered a notification.
|
||
+
|
||
+ {{- if gt (len .GroupLabels.Values) 0 }}
|
||
+ *Group Labels:*
|
||
+ {{- range .GroupLabels.SortedPairs }}
|
||
+ • *{{ .Name }}:* `{{ .Value }}`
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- if .ExternalURL }}
|
||
+ *Link to AlertManager:* {{ .ExternalURL }}
|
||
+ {{- end }}
|
||
+
|
||
+ {{- range .Alerts }}
|
||
+ {{ template "rancher.text_single" . }}
|
||
+ {{- end }}
|
||
+ {{- end -}}
|
||
+
|
||
+ {{- define "rancher.text_single" -}}
|
||
+ {{- if .Labels.alertname }}
|
||
+ *[ALERT - {{ .Labels.alertname }}]*
|
||
+ {{- else }}
|
||
+ *[ALERT]*
|
||
+ {{- end }}
|
||
+ {{- if .Labels.severity }}
|
||
+ *Severity:* `{{ .Labels.severity }}`
|
||
+ {{- end }}
|
||
+ {{- if .Labels.cluster }}
|
||
+ *Cluster:* {{ .Labels.cluster }}
|
||
+ {{- end }}
|
||
+ {{- if .Annotations.summary }}
|
||
+ *Summary:* {{ .Annotations.summary }}
|
||
+ {{- end }}
|
||
+ {{- if .Annotations.message }}
|
||
+ *Message:* {{ .Annotations.message }}
|
||
+ {{- end }}
|
||
+ {{- if .Annotations.description }}
|
||
+ *Description:* {{ .Annotations.description }}
|
||
+ {{- end }}
|
||
+ {{- if .Annotations.runbook_url }}
|
||
+ *Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:>
|
||
+ {{- end }}
|
||
+ {{- with .Labels }}
|
||
+ {{- with .Remove (stringSlice "alertname" "severity" "cluster") }}
|
||
+ {{- if gt (len .) 0 }}
|
||
+ *Additional Labels:*
|
||
+ {{- range .SortedPairs }}
|
||
+ • *{{ .Name }}:* `{{ .Value }}`
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- with .Annotations }}
|
||
+ {{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }}
|
||
+ {{- if gt (len .) 0 }}
|
||
+ *Additional Annotations:*
|
||
+ {{- range .SortedPairs }}
|
||
+ • *{{ .Name }}:* `{{ .Value }}`
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- end }}
|
||
+ {{- end -}}
|
||
|
||
ingress:
|
||
enabled: false
|
||
@@ -235,6 +744,25 @@
|
||
## Configuration for Alertmanager secret
|
||
##
|
||
secret:
|
||
+
|
||
+ # Should the Alertmanager Config Secret be cleaned up on an uninstall?
|
||
+ # This is set to false by default to prevent the loss of alerting configuration on an uninstall
|
||
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
|
||
+ #
|
||
+ cleanupOnUninstall: false
|
||
+
|
||
+ # The image used to manage the Alertmanager Config Secret's lifecycle
|
||
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
|
||
+ #
|
||
+ image:
|
||
+ repository: rancher/rancher-agent
|
||
+ tag: v2.5.7
|
||
+ pullPolicy: IfNotPresent
|
||
+
|
||
+ securityContext:
|
||
+ runAsNonRoot: true
|
||
+ runAsUser: 1000
|
||
+
|
||
annotations: {}
|
||
|
||
## Configuration for creating an Ingress that will map to each Alertmanager replica service
|
||
@@ -345,6 +873,10 @@
|
||
interval: ""
|
||
selfMonitor: true
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||
scheme: ""
|
||
|
||
@@ -352,7 +884,7 @@
|
||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||
tlsConfig: {}
|
||
|
||
- bearerTokenFile:
|
||
+ bearerTokenFile: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
@@ -375,7 +907,7 @@
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
|
||
##
|
||
alertmanagerSpec:
|
||
- ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
+ ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
|
||
##
|
||
podMetadata: {}
|
||
@@ -383,8 +915,8 @@
|
||
## Image of Alertmanager
|
||
##
|
||
image:
|
||
- repository: quay.io/prometheus/alertmanager
|
||
- tag: v0.21.0
|
||
+ repository: rancher/mirrored-prometheus-alertmanager
|
||
+ tag: v0.22.2
|
||
sha: ""
|
||
|
||
## If true then the user will be responsible to provide a secret with alertmanager configuration
|
||
@@ -495,9 +1027,13 @@
|
||
## Define resources requests and limits for single Pods.
|
||
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
|
||
##
|
||
- resources: {}
|
||
- # requests:
|
||
- # memory: 400Mi
|
||
+ resources:
|
||
+ limits:
|
||
+ memory: 500Mi
|
||
+ cpu: 1000m
|
||
+ requests:
|
||
+ memory: 100Mi
|
||
+ cpu: 100m
|
||
|
||
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
||
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
||
@@ -601,10 +1137,54 @@
|
||
enabled: true
|
||
namespaceOverride: ""
|
||
|
||
+ ## Grafana's primary configuration
|
||
+ ## NOTE: values in map will be converted to ini format
|
||
+ ## ref: http://docs.grafana.org/installation/configuration/
|
||
+ ##
|
||
+ grafana.ini:
|
||
+ users:
|
||
+ auto_assign_org_role: Viewer
|
||
+ auth:
|
||
+ disable_login_form: false
|
||
+ auth.anonymous:
|
||
+ enabled: true
|
||
+ org_role: Viewer
|
||
+ auth.basic:
|
||
+ enabled: false
|
||
+ dashboards:
|
||
+ # Modify this value to change the default dashboard shown on the main Grafana page
|
||
+ default_home_dashboard_path: /tmp/dashboards/rancher-default-home.json
|
||
+ security:
|
||
+ # Required to embed dashboards in Rancher Cluster Overview Dashboard on Cluster Explorer
|
||
+ allow_embedding: true
|
||
+
|
||
+ deploymentStrategy:
|
||
+ type: Recreate
|
||
+
|
||
+ ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
|
||
+ ##
|
||
+ forceDeployDatasources: false
|
||
+
|
||
+ ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
|
||
+ ##
|
||
+ forceDeployDashboards: false
|
||
+
|
||
## Deploy default dashboards.
|
||
##
|
||
defaultDashboardsEnabled: true
|
||
|
||
+ # Additional options for defaultDashboards
|
||
+ defaultDashboards:
|
||
+ # The default namespace to place defaultDashboards within
|
||
+ namespace: cattle-dashboards
|
||
+ # Whether to create the default namespace as a Helm managed namespace or use an existing namespace
|
||
+ # If false, the defaultDashboards.namespace will be created as a Helm managed namespace
|
||
+ useExistingNamespace: false
|
||
+ # Whether the Helm managed namespace created by this chart should be left behind on a Helm uninstall
|
||
+ # If you place other dashboards in this namespace, then they will be deleted on a helm uninstall
|
||
+ # Ignore if useExistingNamespace is true
|
||
+ cleanupOnUninstall: false
|
||
+
|
||
adminPassword: prom-operator
|
||
|
||
ingress:
|
||
@@ -644,6 +1224,7 @@
|
||
dashboards:
|
||
enabled: true
|
||
label: grafana_dashboard
|
||
+ searchNamespace: cattle-dashboards
|
||
|
||
## Annotations for Grafana dashboard configmaps
|
||
##
|
||
@@ -653,6 +1234,10 @@
|
||
enabled: true
|
||
defaultDatasourceEnabled: true
|
||
|
||
+ ## URL of prometheus datasource
|
||
+ ##
|
||
+ # url: http://prometheus-stack-prometheus:9090/
|
||
+
|
||
# If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
|
||
# defaultDatasourceScrapeInterval: 15s
|
||
|
||
@@ -692,7 +1277,60 @@
|
||
## Passed to grafana subchart and used by servicemonitor below
|
||
##
|
||
service:
|
||
- portName: service
|
||
+ portName: nginx-http
|
||
+ ## Port for Grafana Service to listen on
|
||
+ ##
|
||
+ port: 80
|
||
+ ## To be used with a proxy extraContainer port
|
||
+ ##
|
||
+ targetPort: 8080
|
||
+ ## Port to expose on each node
|
||
+ ## Only used if service.type is 'NodePort'
|
||
+ ##
|
||
+ nodePort: 30950
|
||
+ ## Service type
|
||
+ ##
|
||
+ type: ClusterIP
|
||
+
|
||
+ proxy:
|
||
+ image:
|
||
+ repository: rancher/mirrored-library-nginx
|
||
+ tag: 1.19.9-alpine
|
||
+
|
||
+ ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
||
+ extraContainers: |
|
||
+ - name: grafana-proxy
|
||
+ args:
|
||
+ - nginx
|
||
+ - -g
|
||
+ - daemon off;
|
||
+ - -c
|
||
+ - /nginx/nginx.conf
|
||
+ image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
|
||
+ ports:
|
||
+ - containerPort: 8080
|
||
+ name: nginx-http
|
||
+ protocol: TCP
|
||
+ volumeMounts:
|
||
+ - mountPath: /nginx
|
||
+ name: grafana-nginx
|
||
+ - mountPath: /var/cache/nginx
|
||
+ name: nginx-home
|
||
+ securityContext:
|
||
+ runAsUser: 101
|
||
+ runAsGroup: 101
|
||
+
|
||
+ ## Volumes that can be used in containers
|
||
+ extraContainerVolumes:
|
||
+ - name: nginx-home
|
||
+ emptyDir: {}
|
||
+ - name: grafana-nginx
|
||
+ configMap:
|
||
+ name: grafana-nginx-proxy-config
|
||
+ items:
|
||
+ - key: nginx.conf
|
||
+ mode: 438
|
||
+ path: nginx.conf
|
||
|
||
## If true, create a serviceMonitor for grafana
|
||
##
|
||
@@ -722,6 +1360,14 @@
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
+
|
||
+ resources:
|
||
+ limits:
|
||
+ memory: 200Mi
|
||
+ cpu: 200m
|
||
+ requests:
|
||
+ memory: 100Mi
|
||
+ cpu: 100m
|
||
|
||
## Component scraping the kube api server
|
||
##
|
||
@@ -730,23 +1376,14 @@
|
||
tlsConfig:
|
||
serverName: kubernetes
|
||
insecureSkipVerify: false
|
||
-
|
||
- ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service
|
||
- ##
|
||
- relabelings: []
|
||
- # - sourceLabels:
|
||
- # - __meta_kubernetes_namespace
|
||
- # - __meta_kubernetes_service_name
|
||
- # - __meta_kubernetes_endpoint_port_name
|
||
- # action: keep
|
||
- # regex: default;kubernetes;https
|
||
- # - targetLabel: __address__
|
||
- # replacement: kubernetes.default.svc:443
|
||
-
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
jobLabel: component
|
||
selector:
|
||
matchLabels:
|
||
@@ -759,6 +1396,15 @@
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
+ relabelings: []
|
||
+ # - sourceLabels:
|
||
+ # - __meta_kubernetes_namespace
|
||
+ # - __meta_kubernetes_service_name
|
||
+ # - __meta_kubernetes_endpoint_port_name
|
||
+ # action: keep
|
||
+ # regex: default;kubernetes;https
|
||
+ # - targetLabel: __address__
|
||
+ # replacement: kubernetes.default.svc:443
|
||
|
||
## Component scraping the kubelet and kubelet-hosted cAdvisor
|
||
##
|
||
@@ -771,6 +1417,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## Enable scraping the kubelet over https. For requirements to enable this see
|
||
## https://github.com/prometheus-operator/prometheus-operator/issues/926
|
||
##
|
||
@@ -879,7 +1529,7 @@
|
||
## Component scraping the kube controller manager
|
||
##
|
||
kubeControllerManager:
|
||
- enabled: true
|
||
+ enabled: false
|
||
|
||
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
@@ -903,6 +1553,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## Enable scraping kube-controller-manager over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
@@ -945,6 +1599,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
@@ -980,6 +1638,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
@@ -1014,7 +1676,7 @@
|
||
## Component scraping etcd
|
||
##
|
||
kubeEtcd:
|
||
- enabled: true
|
||
+ enabled: false
|
||
|
||
## If your etcd is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
@@ -1048,6 +1710,9 @@
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
scheme: http
|
||
insecureSkipVerify: false
|
||
serverName: ""
|
||
@@ -1076,7 +1741,7 @@
|
||
## Component scraping kube scheduler
|
||
##
|
||
kubeScheduler:
|
||
- enabled: true
|
||
+ enabled: false
|
||
|
||
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
@@ -1099,6 +1764,9 @@
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
## Enable scraping kube-scheduler over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
@@ -1131,7 +1799,7 @@
|
||
## Component scraping kube proxy
|
||
##
|
||
kubeProxy:
|
||
- enabled: true
|
||
+ enabled: false
|
||
|
||
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
@@ -1153,6 +1821,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## Enable scraping kube-proxy over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
@@ -1181,9 +1853,15 @@
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
## Override serviceMonitor selector
|
||
##
|
||
selectorOverride: {}
|
||
+ ## Override namespace selector
|
||
+ ##
|
||
+ namespaceOverride: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
@@ -1210,6 +1888,13 @@
|
||
create: true
|
||
podSecurityPolicy:
|
||
enabled: true
|
||
+ resources:
|
||
+ limits:
|
||
+ cpu: 100m
|
||
+ memory: 200Mi
|
||
+ requests:
|
||
+ cpu: 100m
|
||
+ memory: 130Mi
|
||
|
||
## Deploy node exporter as a daemonset to all nodes
|
||
##
|
||
@@ -1225,6 +1910,10 @@
|
||
##
|
||
interval: ""
|
||
|
||
+ ## proxyUrl: URL of a proxy that should be used for scraping.
|
||
+ ##
|
||
+ proxyUrl: ""
|
||
+
|
||
## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
|
||
##
|
||
scrapeTimeout: ""
|
||
@@ -1259,6 +1948,16 @@
|
||
extraArgs:
|
||
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
|
||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
|
||
+ service:
|
||
+ port: 9796
|
||
+ targetPort: 9796
|
||
+ resources:
|
||
+ limits:
|
||
+ cpu: 200m
|
||
+ memory: 50Mi
|
||
+ requests:
|
||
+ cpu: 100m
|
||
+ memory: 30Mi
|
||
|
||
## Manages Prometheus and Alertmanager components
|
||
##
|
||
@@ -1271,8 +1970,8 @@
|
||
enabled: true
|
||
# Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
|
||
tlsMinVersion: VersionTLS13
|
||
- # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
|
||
- internalPort: 10250
|
||
+ # Users who are deploying this chart in GKE private clusters will need to add firewall rules to expose this port for admissions webhooks
|
||
+ internalPort: 8443
|
||
|
||
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
|
||
## rules from making their way into prometheus and potentially preventing the container from starting
|
||
@@ -1289,8 +1988,8 @@
|
||
patch:
|
||
enabled: true
|
||
image:
|
||
- repository: jettech/kube-webhook-certgen
|
||
- tag: v1.5.0
|
||
+ repository: rancher/mirrored-jettech-kube-webhook-certgen
|
||
+ tag: v1.5.2
|
||
sha: ""
|
||
pullPolicy: IfNotPresent
|
||
resources: {}
|
||
@@ -1301,6 +2000,16 @@
|
||
nodeSelector: {}
|
||
affinity: {}
|
||
tolerations: []
|
||
+
|
||
+ ## SecurityContext holds pod-level security attributes and common container settings.
|
||
+ ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
|
||
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||
+ ##
|
||
+ securityContext:
|
||
+ runAsGroup: 2000
|
||
+ runAsNonRoot: true
|
||
+ runAsUser: 2000
|
||
+
|
||
# Use certmanager to generate webhook certs
|
||
certManager:
|
||
enabled: false
|
||
@@ -1428,13 +2137,13 @@
|
||
|
||
## Resource limits & requests
|
||
##
|
||
- resources: {}
|
||
- # limits:
|
||
- # cpu: 200m
|
||
- # memory: 200Mi
|
||
- # requests:
|
||
- # cpu: 100m
|
||
- # memory: 100Mi
|
||
+ resources:
|
||
+ limits:
|
||
+ cpu: 200m
|
||
+ memory: 500Mi
|
||
+ requests:
|
||
+ cpu: 100m
|
||
+ memory: 100Mi
|
||
|
||
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
|
||
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
|
||
@@ -1487,8 +2196,8 @@
|
||
## Prometheus-operator image
|
||
##
|
||
image:
|
||
- repository: quay.io/prometheus-operator/prometheus-operator
|
||
- tag: v0.46.0
|
||
+ repository: rancher/mirrored-prometheus-operator-prometheus-operator
|
||
+ tag: v0.48.0
|
||
sha: ""
|
||
pullPolicy: IfNotPresent
|
||
|
||
@@ -1503,8 +2212,8 @@
|
||
## Prometheus-config-reloader image to use for config and rule reloading
|
||
##
|
||
prometheusConfigReloaderImage:
|
||
- repository: quay.io/prometheus-operator/prometheus-config-reloader
|
||
- tag: v0.46.0
|
||
+ repository: rancher/mirrored-prometheus-operator-prometheus-config-reloader
|
||
+ tag: v0.48.0
|
||
sha: ""
|
||
|
||
## Set the prometheus config reloader side-car CPU limit
|
||
@@ -1535,6 +2244,7 @@
|
||
serviceAccount:
|
||
create: true
|
||
name: ""
|
||
+ annotations: {}
|
||
|
||
# Service for thanos service discovery on sidecar
|
||
# Enable this can make Thanos Query can use
|
||
@@ -1558,6 +2268,24 @@
|
||
##
|
||
nodePort: 30901
|
||
|
||
+ # Service for external access to sidecar
|
||
+ # Enabling this creates a service to expose thanos-sidecar outside the cluster.
|
||
+ thanosServiceExternal:
|
||
+ enabled: false
|
||
+ annotations: {}
|
||
+ labels: {}
|
||
+ portName: grpc
|
||
+ port: 10901
|
||
+ targetPort: "grpc"
|
||
+
|
||
+ ## Service type
|
||
+ ##
|
||
+ type: LoadBalancer
|
||
+
|
||
+ ## Port to expose on each node
|
||
+ ##
|
||
+ nodePort: 30901
|
||
+
|
||
## Configuration for Prometheus service
|
||
##
|
||
service:
|
||
@@ -1570,7 +2298,7 @@
|
||
port: 9090
|
||
|
||
## To be used with a proxy extraContainer port
|
||
- targetPort: 9090
|
||
+ targetPort: 8081
|
||
|
||
## List of IP addresses at which the Prometheus server service is available
|
||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||
@@ -1819,11 +2547,16 @@
|
||
##
|
||
enableAdminAPI: false
|
||
|
||
+ # EnableFeatures API enables access to Prometheus disabled features.
|
||
+ # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
|
||
+ enableFeatures: []
|
||
+ # - exemplar-storage
|
||
+
|
||
## Image of Prometheus.
|
||
##
|
||
image:
|
||
- repository: quay.io/prometheus/prometheus
|
||
- tag: v2.24.0
|
||
+ repository: rancher/mirrored-prometheus-prometheus
|
||
+ tag: v2.27.1
|
||
sha: ""
|
||
|
||
## Tolerations for use with node taints
|
||
@@ -1885,6 +2618,11 @@
|
||
##
|
||
externalUrl: ""
|
||
|
||
+ ## Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs
|
||
+ ## If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into
|
||
+ ##
|
||
+ ignoreNamespaceSelectors: false
|
||
+
|
||
## Define which Nodes the Pods are scheduled on.
|
||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||
##
|
||
@@ -1917,7 +2655,7 @@
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the PrometheusRule resources created
|
||
##
|
||
- ruleSelectorNilUsesHelmValues: true
|
||
+ ruleSelectorNilUsesHelmValues: false
|
||
|
||
## PrometheusRules to be selected for target discovery.
|
||
## If {}, select all PrometheusRules
|
||
@@ -1942,7 +2680,7 @@
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the servicemonitors created
|
||
##
|
||
- serviceMonitorSelectorNilUsesHelmValues: true
|
||
+ serviceMonitorSelectorNilUsesHelmValues: false
|
||
|
||
## ServiceMonitors to be selected for target discovery.
|
||
## If {}, select all ServiceMonitors
|
||
@@ -1965,7 +2703,7 @@
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the podmonitors created
|
||
##
|
||
- podMonitorSelectorNilUsesHelmValues: true
|
||
+ podMonitorSelectorNilUsesHelmValues: false
|
||
|
||
## PodMonitors to be selected for target discovery.
|
||
## If {}, select all PodMonitors
|
||
@@ -2044,7 +2782,7 @@
|
||
##
|
||
routePrefix: /
|
||
|
||
- ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
+ ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
## Metadata Labels and Annotations gets propagated to the prometheus pods.
|
||
##
|
||
podMetadata: {}
|
||
@@ -2081,20 +2819,28 @@
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
|
||
remoteRead: []
|
||
# - url: http://remote1/read
|
||
+ ## additionalRemoteRead is appended to remoteRead
|
||
+ additionalRemoteRead: []
|
||
|
||
## The remote_write spec configuration for Prometheus.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
|
||
remoteWrite: []
|
||
# - url: http://remote1/push
|
||
+ ## additionalRemoteWrite is appended to remoteWrite
|
||
+ additionalRemoteWrite: []
|
||
|
||
## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
|
||
remoteWriteDashboards: false
|
||
|
||
## Resource limits & requests
|
||
##
|
||
- resources: {}
|
||
- # requests:
|
||
- # memory: 400Mi
|
||
+ resources:
|
||
+ limits:
|
||
+ memory: 1500Mi
|
||
+ cpu: 1000m
|
||
+ requests:
|
||
+ memory: 750Mi
|
||
+ cpu: 750m
|
||
|
||
## Prometheus StorageSpec for persistent data
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
||
@@ -2117,7 +2863,13 @@
|
||
# medium: Memory
|
||
|
||
# Additional volumes on the output StatefulSet definition.
|
||
- volumes: []
|
||
+ volumes:
|
||
+ - name: nginx-home
|
||
+ emptyDir: {}
|
||
+ - name: prometheus-nginx
|
||
+ configMap:
|
||
+ name: prometheus-nginx-proxy-config
|
||
+ defaultMode: 438
|
||
|
||
# Additional VolumeMounts on the output StatefulSet definition.
|
||
volumeMounts: []
|
||
@@ -2224,9 +2976,34 @@
|
||
##
|
||
thanos: {}
|
||
|
||
+ proxy:
|
||
+ image:
|
||
+ repository: rancher/mirrored-library-nginx
|
||
+ tag: 1.19.9-alpine
|
||
+
|
||
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
|
||
## if using proxy extraContainer update targetPort with proxy container port
|
||
- containers: []
|
||
+ containers: |
|
||
+ - name: prometheus-proxy
|
||
+ args:
|
||
+ - nginx
|
||
+ - -g
|
||
+ - daemon off;
|
||
+ - -c
|
||
+ - /nginx/nginx.conf
|
||
+ image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}"
|
||
+ ports:
|
||
+ - containerPort: 8081
|
||
+ name: nginx-http
|
||
+ protocol: TCP
|
||
+ volumeMounts:
|
||
+ - mountPath: /nginx
|
||
+ name: prometheus-nginx
|
||
+ - mountPath: /var/cache/nginx
|
||
+ name: nginx-home
|
||
+ securityContext:
|
||
+ runAsUser: 101
|
||
+ runAsGroup: 101
|
||
|
||
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
|
||
## (permissions, dir tree) on mounted volumes before starting prometheus
|
||
@@ -2234,7 +3011,7 @@
|
||
|
||
## PortName to use for Prometheus.
|
||
##
|
||
- portName: "web"
|
||
+ portName: "nginx-http"
|
||
|
||
## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
|
||
## on the file system of the Prometheus container e.g. bearer token files.
|