rancher-charts/packages/rancher-monitoring/generated-changes/patch/values.yaml.patch

1049 lines
30 KiB
Diff

--- charts-original/values.yaml
+++ charts/values.yaml
@@ -2,13 +2,423 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
+# Rancher Monitoring Configuration
+
+## Configuration for prometheus-adapter
+## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter
+##
+prometheus-adapter:
+ enabled: true
+ prometheus:
+ # Change this if you change the namespaceOverride or nameOverride of prometheus-operator
+ url: http://rancher-monitoring-prometheus.cattle-monitoring-system.svc
+ port: 9090
+ psp:
+ create: true
+
+## RKE PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
+##
+rkeControllerManager:
+ enabled: false
+ metricsPort: 10252
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/controlplane: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeScheduler:
+ enabled: false
+ metricsPort: 10251
+ component: kube-scheduler
+ clients:
+ port: 10012
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/controlplane: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeProxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeEtcd:
+ enabled: false
+ metricsPort: 2379
+ component: kube-etcd
+ clients:
+ port: 10014
+ https:
+ enabled: true
+ certDir: /etc/kubernetes/ssl
+ certFile: kube-etcd-*.pem
+ keyFile: kube-etcd-*-key.pem
+ caCertFile: kube-ca.pem
+ nodeSelector:
+ node-role.kubernetes.io/etcd: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rkeIngressNginx:
+ enabled: false
+ metricsPort: 10254
+ component: ingress-nginx
+ clients:
+ port: 10015
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ nodeSelector:
+ node-role.kubernetes.io/worker: "true"
+
+## k3s PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
+##
+k3sServer:
+ enabled: false
+ metricsPort: 10250
+ component: k3s-server
+ clients:
+ port: 10013
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ rbac:
+ additionalRules:
+ - nonResourceURLs: ["/metrics/cadvisor"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["nodes/metrics"]
+ verbs: ["get"]
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ serviceMonitor:
+ endpoints:
+ - port: metrics
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ - port: metrics
+ path: /metrics/cadvisor
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ - port: metrics
+ path: /metrics/probes
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+
+## KubeADM PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
+##
+kubeAdmControllerManager:
+ enabled: false
+ metricsPort: 10257
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmScheduler:
+ enabled: false
+ metricsPort: 10259
+ component: kube-scheduler
+ clients:
+ port: 10012
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmProxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+kubeAdmEtcd:
+ enabled: false
+ metricsPort: 2381
+ component: kube-etcd
+ clients:
+ port: 10014
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+## rke2 PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
+##
+rke2ControllerManager:
+ enabled: false
+ metricsPort: 10252
+ component: kube-controller-manager
+ clients:
+ port: 10011
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Scheduler:
+ enabled: false
+ metricsPort: 10251
+ component: kube-scheduler
+ clients:
+ port: 10012
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/master: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Proxy:
+ enabled: false
+ metricsPort: 10249
+ component: kube-proxy
+ clients:
+ port: 10013
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2Etcd:
+ enabled: false
+ metricsPort: 2381
+ component: kube-etcd
+ clients:
+ port: 10014
+ useLocalhost: true
+ nodeSelector:
+ node-role.kubernetes.io/etcd: "true"
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+rke2IngressNginx:
+ enabled: false
+ metricsPort: 10254
+ component: ingress-nginx
+ clients:
+ port: 10015
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: "app.kubernetes.io/component"
+ operator: "In"
+ values:
+ - "controller"
+ topologyKey: "kubernetes.io/hostname"
+ namespaces:
+ - "kube-system"
+ # in the RKE2 cluster, the ingress-nginx-controller is deployed as
+ # a Deployment with 1 pod when RKE2 version is <= 1.20,
+ # a DaemonSet when RKE2 version is >= 1.21
+ deployment:
+ enabled: false
+ replicas: 1
+
+
+
+## Additional PushProx Monitoring
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
+##
+
+# hardenedKubelet can only be deployed if kubelet.enabled=true
+# If enabled, it replaces the ServiceMonitor deployed by the default kubelet option with a
+# PushProx-based exporter that does not require a host port to be open to scrape metrics.
+hardenedKubelet:
+ enabled: false
+ metricsPort: 10250
+ component: kubelet
+ clients:
+ port: 10015
+ useLocalhost: true
+ https:
+ enabled: true
+ useServiceAccountCredentials: true
+ insecureSkipVerify: true
+ rbac:
+ additionalRules:
+ - nonResourceURLs: ["/metrics/cadvisor"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["nodes/metrics"]
+ verbs: ["get"]
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ serviceMonitor:
+ endpoints:
+ - port: metrics
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ - port: metrics
+ path: /metrics/cadvisor
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ - port: metrics
+ path: /metrics/probes
+ honorLabels: true
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+
+# hardenedNodeExporter can only be deployed if nodeExporter.enabled=true
+# If enabled, it replaces the ServiceMonitor deployed by the default nodeExporter with a
+# PushProx-based exporter that does not require a host port to be open to scrape metrics.
+hardenedNodeExporter:
+ enabled: false
+ metricsPort: 9796
+ component: node-exporter
+ clients:
+ port: 10016
+ useLocalhost: true
+ tolerations:
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+
+## Component scraping nginx-ingress-controller
+##
+ingressNginx:
+ enabled: false
+
+ ## The namespace to search for your nginx-ingress-controller
+ ##
+ namespace: ingress-nginx
+
+ service:
+ port: 9913
+ targetPort: 10254
+ # selector:
+ # app: ingress-nginx
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## metric relabel configs to apply to samples before ingestion.
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+# Prometheus Operator Configuration
+
## Provide a name in place of kube-prometheus-stack for `app:` labels
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
##
-nameOverride: ""
+nameOverride: "rancher-monitoring"
## Override the deployment namespace
+## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
##
-namespaceOverride: ""
+namespaceOverride: "cattle-monitoring-system"
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
##
@@ -89,8 +499,32 @@
##
global:
+ cattle:
+ systemDefaultRegistry: ""
+ ## Windows Monitoring
+ ## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-windows-exporter
+ ##
+ ## Deploys a DaemonSet of Prometheus exporters based on https://github.com/prometheus-community/windows_exporter.
+ ## Every Windows host must have a wins version of 0.1.0+ to use this chart (default as of Rancher 2.5.8).
+ ## To upgrade wins versions on Windows hosts, see https://github.com/rancher/wins/tree/master/charts/rancher-wins-upgrader.
+ ##
+ windows:
+ enabled: false
+ kubectl:
+ repository: rancher/kubectl
+ tag: v1.20.2
+ pullPolicy: IfNotPresent
rbac:
+ ## Create RBAC resources for ServiceAccounts and users
+ ##
create: true
+
+ userRoles:
+ ## Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets
+ create: true
+ ## Aggregate default user ClusterRoles into default k8s ClusterRoles
+ aggregateToDefaultRoles: true
+
pspEnabled: true
pspAnnotations: {}
## Specify pod annotations
@@ -143,6 +577,22 @@
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
## https://prometheus.io/webtools/alerting/routing-tree-editor/
##
+ ## Example Slack Config
+ ## config:
+ ## route:
+ ## group_by: ['job']
+ ## group_wait: 30s
+ ## group_interval: 5m
+ ## repeat_interval: 3h
+ ## receiver: 'slack-notifications'
+ ## receivers:
+ ## - name: 'slack-notifications'
+ ## slack_configs:
+ ## - send_resolved: true
+ ## text: '{{ template "slack.rancher.text" . }}'
+ ## api_url: <slack-webhook-url-here>
+ ## templates:
+ ## - /etc/alertmanager/config/*.tmpl
config:
global:
resolve_timeout: 5m
@@ -179,25 +629,76 @@
## ref: https://prometheus.io/docs/alerting/notifications/
## https://prometheus.io/docs/alerting/notification_examples/
##
- templateFiles: {}
- #
- ## An example template:
- # template_1.tmpl: |-
- # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
- #
- # {{ define "slack.myorg.text" }}
- # {{- $root := . -}}
- # {{ range .Alerts }}
- # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
- # *Cluster:* {{ template "cluster" $root }}
- # *Description:* {{ .Annotations.description }}
- # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
- # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
- # *Details:*
- # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- # {{ end }}
- # {{ end }}
- # {{ end }}
+ templateFiles:
+ rancher_defaults.tmpl: |-
+ {{- define "slack.rancher.text" -}}
+ {{ template "rancher.text_multiple" . }}
+ {{- end -}}
+
+ {{- define "rancher.text_multiple" -}}
+ *[GROUP - Details]*
+ One or more alarms in this group have triggered a notification.
+
+ {{- if gt (len .GroupLabels.Values) 0 }}
+ *Group Labels:*
+ {{- range .GroupLabels.SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- if .ExternalURL }}
+ *Link to AlertManager:* {{ .ExternalURL }}
+ {{- end }}
+
+ {{- range .Alerts }}
+ {{ template "rancher.text_single" . }}
+ {{- end }}
+ {{- end -}}
+
+ {{- define "rancher.text_single" -}}
+ {{- if .Labels.alertname }}
+ *[ALERT - {{ .Labels.alertname }}]*
+ {{- else }}
+ *[ALERT]*
+ {{- end }}
+ {{- if .Labels.severity }}
+ *Severity:* `{{ .Labels.severity }}`
+ {{- end }}
+ {{- if .Labels.cluster }}
+ *Cluster:* {{ .Labels.cluster }}
+ {{- end }}
+ {{- if .Annotations.summary }}
+ *Summary:* {{ .Annotations.summary }}
+ {{- end }}
+ {{- if .Annotations.message }}
+ *Message:* {{ .Annotations.message }}
+ {{- end }}
+ {{- if .Annotations.description }}
+ *Description:* {{ .Annotations.description }}
+ {{- end }}
+ {{- if .Annotations.runbook_url }}
+ *Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:>
+ {{- end }}
+ {{- with .Labels }}
+ {{- with .Remove (stringSlice "alertname" "severity" "cluster") }}
+ {{- if gt (len .) 0 }}
+ *Additional Labels:*
+ {{- range .SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- with .Annotations }}
+ {{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }}
+ {{- if gt (len .) 0 }}
+ *Additional Annotations:*
+ {{- range .SortedPairs }}
+ • *{{ .Name }}:* `{{ .Value }}`
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end -}}
ingress:
enabled: false
@@ -235,6 +736,25 @@
## Configuration for Alertmanager secret
##
secret:
+
+ # Should the Alertmanager Config Secret be cleaned up on an uninstall?
+ # This is set to false by default to prevent the loss of alerting configuration on an uninstall
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
+ #
+ cleanupOnUninstall: false
+
+ # The image used to manage the Alertmanager Config Secret's lifecycle
+ # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
+ #
+ image:
+ repository: rancher/rancher-agent
+ tag: v2.5.7
+ pullPolicy: IfNotPresent
+
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+
annotations: {}
## Configuration for creating an Ingress that will map to each Alertmanager replica service
@@ -352,7 +872,7 @@
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
tlsConfig: {}
- bearerTokenFile:
+ bearerTokenFile: ""
## metric relabel configs to apply to samples before ingestion.
##
@@ -383,7 +903,7 @@
## Image of Alertmanager
##
image:
- repository: quay.io/prometheus/alertmanager
+ repository: rancher/mirrored-prom-alertmanager
tag: v0.21.0
sha: ""
@@ -495,9 +1015,13 @@
## Define resources requests and limits for single Pods.
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
- resources: {}
- # requests:
- # memory: 400Mi
+ resources:
+ limits:
+ memory: 500Mi
+ cpu: 1000m
+ requests:
+ memory: 100Mi
+ cpu: 100m
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
@@ -601,10 +1125,46 @@
enabled: true
namespaceOverride: ""
+ ## Grafana's primary configuration
+ ## NOTE: values in map will be converted to ini format
+ ## ref: http://docs.grafana.org/installation/configuration/
+ ##
+ grafana.ini:
+ users:
+ auto_assign_org_role: Viewer
+ auth:
+ disable_login_form: false
+ auth.anonymous:
+ enabled: true
+ org_role: Viewer
+ auth.basic:
+ enabled: false
+ dashboards:
+ # Modify this value to change the default dashboard shown on the main Grafana page
+ default_home_dashboard_path: /tmp/dashboards/rancher-default-home.json
+ security:
+ # Required to embed dashboards in Rancher Cluster Overview Dashboard on Cluster Explorer
+ allow_embedding: true
+
+ deploymentStrategy:
+ type: Recreate
+
## Deploy default dashboards.
##
defaultDashboardsEnabled: true
+ # Additional options for defaultDashboards
+ defaultDashboards:
+ # The default namespace to place defaultDashboards within
+ namespace: cattle-dashboards
+ # Whether to create the default namespace as a Helm managed namespace or use an existing namespace
+ # If false, the defaultDashboards.namespace will be created as a Helm managed namespace
+ useExistingNamespace: false
+ # Whether the Helm managed namespace created by this chart should be left behind on a Helm uninstall
+ # If you place other dashboards in this namespace, then they will be deleted on a helm uninstall
+ # Ignore if useExistingNamespace is true
+ cleanupOnUninstall: false
+
adminPassword: prom-operator
ingress:
@@ -644,6 +1204,7 @@
dashboards:
enabled: true
label: grafana_dashboard
+ searchNamespace: cattle-dashboards
## Annotations for Grafana dashboard configmaps
##
@@ -692,7 +1253,60 @@
## Passed to grafana subchart and used by servicemonitor below
##
service:
- portName: service
+ portName: nginx-http
+ ## Port for Grafana Service to listen on
+ ##
+ port: 80
+ ## To be used with a proxy extraContainer port
+ ##
+ targetPort: 8080
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30950
+ ## Service type
+ ##
+ type: ClusterIP
+
+ proxy:
+ image:
+ repository: rancher/mirrored-library-nginx
+ tag: 1.19.9-alpine
+
+ ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
+ extraContainers: |
+ - name: grafana-proxy
+ args:
+ - nginx
+ - -g
+ - daemon off;
+ - -c
+ - /nginx/nginx.conf
+ image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
+ ports:
+ - containerPort: 8080
+ name: nginx-http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /nginx
+ name: grafana-nginx
+ - mountPath: /var/cache/nginx
+ name: nginx-home
+ securityContext:
+ runAsUser: 101
+ runAsGroup: 101
+
+ ## Volumes that can be used in containers
+ extraContainerVolumes:
+ - name: nginx-home
+ emptyDir: {}
+ - name: grafana-nginx
+ configMap:
+ name: grafana-nginx-proxy-config
+ items:
+ - key: nginx.conf
+ mode: 438
+ path: nginx.conf
## If true, create a serviceMonitor for grafana
##
@@ -722,6 +1336,14 @@
# targetLabel: nodename
# replacement: $1
# action: replace
+
+ resources:
+ limits:
+ memory: 200Mi
+ cpu: 200m
+ requests:
+ memory: 100Mi
+ cpu: 100m
## Component scraping the kube api server
##
@@ -879,7 +1501,7 @@
## Component scraping the kube controller manager
##
kubeControllerManager:
- enabled: true
+ enabled: false
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
##
@@ -1014,7 +1636,7 @@
## Component scraping etcd
##
kubeEtcd:
- enabled: true
+ enabled: false
## If your etcd is not deployed as a pod, specify IPs it can be found on
##
@@ -1076,7 +1698,7 @@
## Component scraping kube scheduler
##
kubeScheduler:
- enabled: true
+ enabled: false
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
##
@@ -1131,7 +1753,7 @@
## Component scraping kube proxy
##
kubeProxy:
- enabled: true
+ enabled: false
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
##
@@ -1210,6 +1832,13 @@
create: true
podSecurityPolicy:
enabled: true
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 130Mi
## Deploy node exporter as a daemonset to all nodes
##
@@ -1259,6 +1888,16 @@
extraArgs:
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
+ service:
+ port: 9796
+ targetPort: 9796
+ resources:
+ limits:
+ cpu: 200m
+ memory: 50Mi
+ requests:
+ cpu: 100m
+ memory: 30Mi
## Manages Prometheus and Alertmanager components
##
@@ -1271,8 +1910,8 @@
enabled: true
# Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
tlsMinVersion: VersionTLS13
- # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
- internalPort: 10250
+ # Users who are deploying this chart in GKE private clusters will need to add firewall rules to expose this port for admissions webhooks
+ internalPort: 8443
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
## rules from making their way into prometheus and potentially preventing the container from starting
@@ -1289,7 +1928,7 @@
patch:
enabled: true
image:
- repository: jettech/kube-webhook-certgen
+ repository: rancher/mirrored-jettech-kube-webhook-certgen
tag: v1.5.0
sha: ""
pullPolicy: IfNotPresent
@@ -1428,13 +2067,13 @@
## Resource limits & requests
##
- resources: {}
- # limits:
- # cpu: 200m
- # memory: 200Mi
- # requests:
- # cpu: 100m
- # memory: 100Mi
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
@@ -1487,7 +2126,7 @@
## Prometheus-operator image
##
image:
- repository: quay.io/prometheus-operator/prometheus-operator
+ repository: rancher/mirrored-prometheus-operator-prometheus-operator
tag: v0.46.0
sha: ""
pullPolicy: IfNotPresent
@@ -1503,7 +2142,7 @@
## Prometheus-config-reloader image to use for config and rule reloading
##
prometheusConfigReloaderImage:
- repository: quay.io/prometheus-operator/prometheus-config-reloader
+ repository: rancher/mirrored-prometheus-operator-prometheus-config-reloader
tag: v0.46.0
sha: ""
@@ -1558,6 +2197,14 @@
##
nodePort: 30901
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## Port to expose on each node
+ ##
+ nodePort: 30901
+
## Configuration for Prometheus service
##
service:
@@ -1570,7 +2217,7 @@
port: 9090
## To be used with a proxy extraContainer port
- targetPort: 9090
+ targetPort: 8081
## List of IP addresses at which the Prometheus server service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
@@ -1822,7 +2469,7 @@
## Image of Prometheus.
##
image:
- repository: quay.io/prometheus/prometheus
+ repository: rancher/mirrored-prometheus-prometheus
tag: v2.24.0
sha: ""
@@ -1885,6 +2532,11 @@
##
externalUrl: ""
+ ## Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs
+ ## If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into
+ ##
+ ignoreNamespaceSelectors: false
+
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
@@ -1917,7 +2569,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the PrometheusRule resources created
##
- ruleSelectorNilUsesHelmValues: true
+ ruleSelectorNilUsesHelmValues: false
## PrometheusRules to be selected for target discovery.
## If {}, select all PrometheusRules
@@ -1942,7 +2594,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the servicemonitors created
##
- serviceMonitorSelectorNilUsesHelmValues: true
+ serviceMonitorSelectorNilUsesHelmValues: false
## ServiceMonitors to be selected for target discovery.
## If {}, select all ServiceMonitors
@@ -1965,7 +2617,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the podmonitors created
##
- podMonitorSelectorNilUsesHelmValues: true
+ podMonitorSelectorNilUsesHelmValues: false
## PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors
@@ -2092,9 +2744,13 @@
## Resource limits & requests
##
- resources: {}
- # requests:
- # memory: 400Mi
+ resources:
+ limits:
+ memory: 1500Mi
+ cpu: 1000m
+ requests:
+ memory: 750Mi
+ cpu: 750m
## Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
@@ -2117,7 +2773,13 @@
# medium: Memory
# Additional volumes on the output StatefulSet definition.
- volumes: []
+ volumes:
+ - name: nginx-home
+ emptyDir: {}
+ - name: prometheus-nginx
+ configMap:
+ name: prometheus-nginx-proxy-config
+ defaultMode: 438
# Additional VolumeMounts on the output StatefulSet definition.
volumeMounts: []
@@ -2224,9 +2886,34 @@
##
thanos: {}
+ proxy:
+ image:
+ repository: rancher/mirrored-library-nginx
+ tag: 1.19.9-alpine
+
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
## if using proxy extraContainer update targetPort with proxy container port
- containers: []
+ containers: |
+ - name: prometheus-proxy
+ args:
+ - nginx
+ - -g
+ - daemon off;
+ - -c
+ - /nginx/nginx.conf
+ image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}"
+ ports:
+ - containerPort: 8081
+ name: nginx-http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /nginx
+ name: prometheus-nginx
+ - mountPath: /var/cache/nginx
+ name: nginx-home
+ securityContext:
+ runAsUser: 101
+ runAsGroup: 101
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
## (permissions, dir tree) on mounted volumes before starting prometheus
@@ -2234,7 +2921,7 @@
## PortName to use for Prometheus.
##
- portName: "web"
+ portName: "nginx-http"
## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
## on the file system of the Prometheus container e.g. bearer token files.