mirror of https://git.rancher.io/charts
2580 lines
75 KiB
YAML
2580 lines
75 KiB
YAML
# Default values for kube-prometheus-stack.
|
||
# This is a YAML-formatted file.
|
||
# Declare variables to be passed into your templates.
|
||
|
||
# Rancher Monitoring Configuration
|
||
|
||
## Configuration for prometheus-adapter
|
||
## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter
|
||
##
|
||
prometheus-adapter:
|
||
enabled: true
|
||
prometheus:
|
||
# Change this if you change the namespaceOverride or nameOverride of prometheus-operator
|
||
url: http://rancher-monitoring-prometheus.cattle-monitoring-system.svc
|
||
port: 9090
|
||
image:
|
||
repository: rancher/directxman12-k8s-prometheus-adapter-amd64
|
||
tag: v0.7.0
|
||
pullPolicy: IfNotPresent
|
||
pullSecrets: {}
|
||
psp:
|
||
create: true
|
||
|
||
## RKE PushProx Monitoring
|
||
## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
|
||
##
|
||
rkeControllerManager:
|
||
enabled: false
|
||
metricsPort: 10252
|
||
component: kube-controller-manager
|
||
clients:
|
||
port: 10011
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/controlplane: "true"
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rkeScheduler:
|
||
enabled: false
|
||
metricsPort: 10251
|
||
component: kube-scheduler
|
||
clients:
|
||
port: 10012
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/controlplane: "true"
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rkeProxy:
|
||
enabled: false
|
||
metricsPort: 10249
|
||
component: kube-proxy
|
||
clients:
|
||
port: 10013
|
||
useLocalhost: true
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rkeEtcd:
|
||
enabled: false
|
||
metricsPort: 2379
|
||
component: kube-etcd
|
||
clients:
|
||
port: 10014
|
||
https:
|
||
enabled: true
|
||
certDir: /etc/kubernetes/ssl
|
||
certFile: kube-etcd-*.pem
|
||
keyFile: kube-etcd-*-key.pem
|
||
caCertFile: kube-ca.pem
|
||
nodeSelector:
|
||
node-role.kubernetes.io/etcd: "true"
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
## k3s PushProx Monitoring
|
||
## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
|
||
##
|
||
k3sServer:
|
||
enabled: false
|
||
metricsPort: 10249
|
||
component: k3s-server
|
||
clients:
|
||
port: 10013
|
||
useLocalhost: true
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
## KubeADM PushProx Monitoring
|
||
## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
|
||
##
|
||
kubeAdmControllerManager:
|
||
enabled: false
|
||
metricsPort: 10257
|
||
component: kube-controller-manager
|
||
clients:
|
||
port: 10011
|
||
useLocalhost: true
|
||
https:
|
||
enabled: true
|
||
useServiceAccountCredentials: true
|
||
insecureSkipVerify: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/master: ""
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
kubeAdmScheduler:
|
||
enabled: false
|
||
metricsPort: 10259
|
||
component: kube-scheduler
|
||
clients:
|
||
port: 10012
|
||
useLocalhost: true
|
||
https:
|
||
enabled: true
|
||
useServiceAccountCredentials: true
|
||
insecureSkipVerify: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/master: ""
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
kubeAdmProxy:
|
||
enabled: false
|
||
metricsPort: 10249
|
||
component: kube-proxy
|
||
clients:
|
||
port: 10013
|
||
useLocalhost: true
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
kubeAdmEtcd:
|
||
enabled: false
|
||
metricsPort: 2381
|
||
component: kube-etcd
|
||
clients:
|
||
port: 10014
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/master: ""
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
## rke2 PushProx Monitoring
|
||
## ref: https://github.com/rancher/charts/tree/master/packages/rancher-pushprox
|
||
##
|
||
rke2ControllerManager:
|
||
enabled: false
|
||
metricsPort: 10252
|
||
component: kube-controller-manager
|
||
clients:
|
||
port: 10011
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/master: "true"
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rke2Scheduler:
|
||
enabled: false
|
||
metricsPort: 10251
|
||
component: kube-scheduler
|
||
clients:
|
||
port: 10012
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/master: "true"
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rke2Proxy:
|
||
enabled: false
|
||
metricsPort: 10249
|
||
component: kube-proxy
|
||
clients:
|
||
port: 10013
|
||
useLocalhost: true
|
||
tolerations:
|
||
- effect: "NoExecute"
|
||
operator: "Exists"
|
||
- effect: "NoSchedule"
|
||
operator: "Exists"
|
||
|
||
rke2Etcd:
|
||
enabled: false
|
||
metricsPort: 2381
|
||
component: kube-etcd
|
||
clients:
|
||
port: 10014
|
||
useLocalhost: true
|
||
nodeSelector:
|
||
node-role.kubernetes.io/etcd: "true"
|
||
tolerations:
|
||
- effect: "NoSchedule"
|
||
key: node-role.kubernetes.io/master
|
||
operator: "Equal"
|
||
|
||
# Prometheus Operator Configuration
|
||
|
||
## Provide a name in place of kube-prometheus-stack for `app:` labels
|
||
## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
||
##
|
||
nameOverride: "rancher-monitoring"
|
||
|
||
## Override the deployment namespace
|
||
## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
||
##
|
||
namespaceOverride: "cattle-monitoring-system"
|
||
|
||
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
|
||
##
|
||
kubeTargetVersionOverride: ""
|
||
|
||
## Provide a name to substitute for the full names of resources
|
||
##
|
||
fullnameOverride: ""
|
||
|
||
## Labels to apply to all resources
|
||
##
|
||
commonLabels: {}
|
||
# scmhash: abc123
|
||
# myLabel: aakkmd
|
||
|
||
## Create default rules for monitoring the cluster
|
||
##
|
||
defaultRules:
|
||
create: true
|
||
rules:
|
||
alertmanager: true
|
||
etcd: true
|
||
general: true
|
||
k8s: true
|
||
kubeApiserver: true
|
||
kubeApiserverAvailability: true
|
||
kubeApiserverError: true
|
||
kubeApiserverSlos: true
|
||
kubelet: true
|
||
kubePrometheusGeneral: true
|
||
kubePrometheusNodeAlerting: true
|
||
kubePrometheusNodeRecording: true
|
||
kubernetesAbsent: true
|
||
kubernetesApps: true
|
||
kubernetesResources: true
|
||
kubernetesStorage: true
|
||
kubernetesSystem: true
|
||
kubeScheduler: true
|
||
kubeStateMetrics: true
|
||
network: true
|
||
node: true
|
||
prometheus: true
|
||
prometheusOperator: true
|
||
time: true
|
||
|
||
## Runbook url prefix for default rules
|
||
runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#
|
||
## Reduce app namespace alert scope
|
||
appNamespacesTarget: ".*"
|
||
|
||
## Labels for default rules
|
||
labels: {}
|
||
## Annotations for default rules
|
||
annotations: {}
|
||
|
||
## Provide custom recording or alerting rules to be deployed into the cluster.
|
||
##
|
||
additionalPrometheusRules: []
|
||
# - name: my-rule-file
|
||
# groups:
|
||
# - name: my_group
|
||
# rules:
|
||
# - record: my_record
|
||
# expr: 100 * my_record
|
||
|
||
##
|
||
global:
|
||
cattle:
|
||
systemDefaultRegistry: ""
|
||
kubectl:
|
||
repository: rancher/kubectl
|
||
tag: v1.18.6
|
||
pullPolicy: IfNotPresent
|
||
rbac:
|
||
## Create RBAC resources for ServiceAccounts and users
|
||
##
|
||
create: true
|
||
|
||
userRoles:
|
||
## Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets
|
||
create: true
|
||
## Aggregate default user ClusterRoles into default k8s ClusterRoles
|
||
aggregateToDefaultRoles: true
|
||
|
||
pspEnabled: true
|
||
pspAnnotations: {}
|
||
## Specify pod annotations
|
||
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
|
||
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
|
||
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
|
||
##
|
||
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||
|
||
## Reference to one or more secrets to be used when pulling images
|
||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||
##
|
||
imagePullSecrets: []
|
||
# - name: "image-pull-secret"
|
||
|
||
## Configuration for alertmanager
|
||
## ref: https://prometheus.io/docs/alerting/alertmanager/
|
||
##
|
||
alertmanager:
|
||
|
||
## Deploy alertmanager
|
||
##
|
||
enabled: true
|
||
|
||
## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
|
||
##
|
||
apiVersion: v2
|
||
|
||
## Service account for Alertmanager to use.
|
||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||
##
|
||
serviceAccount:
|
||
create: true
|
||
name: ""
|
||
annotations: {}
|
||
|
||
## Configure pod disruption budgets for Alertmanager
|
||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
|
||
## This configuration is immutable once created and will require the PDB to be deleted to be changed
|
||
## https://github.com/kubernetes/kubernetes/issues/45398
|
||
##
|
||
podDisruptionBudget:
|
||
enabled: false
|
||
minAvailable: 1
|
||
maxUnavailable: ""
|
||
|
||
## Alertmanager configuration directives
|
||
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
|
||
## https://prometheus.io/webtools/alerting/routing-tree-editor/
|
||
##
|
||
## Example Slack Config
|
||
## config:
|
||
## route:
|
||
## group_by: ['job']
|
||
## group_wait: 30s
|
||
## group_interval: 5m
|
||
## repeat_interval: 3h
|
||
## receiver: 'slack-notifications'
|
||
## receivers:
|
||
## - name: 'slack-notifications'
|
||
## slack_configs:
|
||
## - send_resolved: true
|
||
## text: '{{ template "slack.rancher.text" . }}'
|
||
## api_url: <slack-webhook-url-here>
|
||
## templates:
|
||
## - /etc/alertmanager/config/*.tmpl
|
||
config:
|
||
global:
|
||
resolve_timeout: 5m
|
||
route:
|
||
group_by: ['job']
|
||
group_wait: 30s
|
||
group_interval: 5m
|
||
repeat_interval: 12h
|
||
receiver: 'null'
|
||
routes:
|
||
- match:
|
||
alertname: Watchdog
|
||
receiver: 'null'
|
||
receivers:
|
||
- name: 'null'
|
||
templates:
|
||
- /etc/alertmanager/config/*.tmpl
|
||
|
||
## Pass the Alertmanager configuration directives through Helm's templating
|
||
## engine. If the Alertmanager configuration contains Alertmanager templates,
|
||
## they'll need to be properly escaped so that they are not interpreted by
|
||
## Helm
|
||
## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
|
||
## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E
|
||
## https://prometheus.io/docs/alerting/notifications/
|
||
## https://prometheus.io/docs/alerting/notification_examples/
|
||
tplConfig: false
|
||
|
||
## Alertmanager template files to format alerts
|
||
## ref: https://prometheus.io/docs/alerting/notifications/
|
||
## https://prometheus.io/docs/alerting/notification_examples/
|
||
##
|
||
templateFiles:
|
||
rancher_defaults.tmpl: |-
|
||
{{- define "slack.rancher.text" -}}
|
||
{{ template "rancher.text_multiple" . }}
|
||
{{- end -}}
|
||
|
||
{{- define "rancher.text_multiple" -}}
|
||
*[GROUP - Details]*
|
||
One or more alarms in this group have triggered a notification.
|
||
|
||
{{- if gt (len .GroupLabels.Values) 0 }}
|
||
*Group Labels:*
|
||
{{- range .GroupLabels.SortedPairs }}
|
||
• *{{ .Name }}:* `{{ .Value }}`
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- if .ExternalURL }}
|
||
*Link to AlertManager:* {{ .ExternalURL }}
|
||
{{- end }}
|
||
|
||
{{- range .Alerts }}
|
||
{{ template "rancher.text_single" . }}
|
||
{{- end }}
|
||
{{- end -}}
|
||
|
||
{{- define "rancher.text_single" -}}
|
||
{{- if .Labels.alertname }}
|
||
*[ALERT - {{ .Labels.alertname }}]*
|
||
{{- else }}
|
||
*[ALERT]*
|
||
{{- end }}
|
||
{{- if .Labels.severity }}
|
||
*Severity:* `{{ .Labels.severity }}`
|
||
{{- end }}
|
||
{{- if .Labels.cluster }}
|
||
*Cluster:* {{ .Labels.cluster }}
|
||
{{- end }}
|
||
{{- if .Annotations.summary }}
|
||
*Summary:* {{ .Annotations.summary }}
|
||
{{- end }}
|
||
{{- if .Annotations.message }}
|
||
*Message:* {{ .Annotations.message }}
|
||
{{- end }}
|
||
{{- if .Annotations.description }}
|
||
*Description:* {{ .Annotations.description }}
|
||
{{- end }}
|
||
{{- if .Annotations.runbook_url }}
|
||
*Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:>
|
||
{{- end }}
|
||
{{- with .Labels }}
|
||
{{- with .Remove (stringSlice "alertname" "severity" "cluster") }}
|
||
{{- if gt (len .) 0 }}
|
||
*Additional Labels:*
|
||
{{- range .SortedPairs }}
|
||
• *{{ .Name }}:* `{{ .Value }}`
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- with .Annotations }}
|
||
{{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }}
|
||
{{- if gt (len .) 0 }}
|
||
*Additional Annotations:*
|
||
{{- range .SortedPairs }}
|
||
• *{{ .Name }}:* `{{ .Value }}`
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- end }}
|
||
{{- end -}}
|
||
|
||
ingress:
|
||
enabled: false
|
||
|
||
annotations: {}
|
||
|
||
labels: {}
|
||
|
||
## Hosts must be provided if Ingress is enabled.
|
||
##
|
||
hosts: []
|
||
# - alertmanager.domain.com
|
||
|
||
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
|
||
##
|
||
paths: []
|
||
# - /
|
||
|
||
## TLS configuration for Alertmanager Ingress
|
||
## Secret must be manually created in the namespace
|
||
##
|
||
tls: []
|
||
# - secretName: alertmanager-general-tls
|
||
# hosts:
|
||
# - alertmanager.example.com
|
||
|
||
## Configuration for Alertmanager secret
|
||
##
|
||
secret:
|
||
|
||
# Should the Alertmanager Config Secret be cleaned up on an uninstall?
|
||
# This is set to false by default to prevent the loss of alerting configuration on an uninstall
|
||
# Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
|
||
#
|
||
cleanupOnUninstall: false
|
||
|
||
# The image used to manage the Alertmanager Config Secret's lifecycle
|
||
# Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false
|
||
#
|
||
image:
|
||
repository: rancher/rancher-agent
|
||
tag: v2.4.8
|
||
pullPolicy: IfNotPresent
|
||
|
||
securityContext:
|
||
runAsNonRoot: true
|
||
runAsUser: 1000
|
||
|
||
annotations: {}
|
||
|
||
## Configuration for creating an Ingress that will map to each Alertmanager replica service
|
||
## alertmanager.servicePerReplica must be enabled
|
||
##
|
||
ingressPerReplica:
|
||
enabled: false
|
||
annotations: {}
|
||
labels: {}
|
||
|
||
## Final form of the hostname for each per replica ingress is
|
||
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
|
||
##
|
||
## Prefix for the per replica ingress that will have `-$replicaNumber`
|
||
## appended to the end
|
||
hostPrefix: ""
|
||
## Domain that will be used for the per replica ingress
|
||
hostDomain: ""
|
||
|
||
## Paths to use for ingress rules
|
||
##
|
||
paths: []
|
||
# - /
|
||
|
||
## Secret name containing the TLS certificate for alertmanager per replica ingress
|
||
## Secret must be manually created in the namespace
|
||
tlsSecretName: ""
|
||
|
||
## Separated secret for each per replica Ingress. Can be used together with cert-manager
|
||
##
|
||
tlsSecretPerReplica:
|
||
enabled: false
|
||
## Final form of the secret for each per replica ingress is
|
||
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
|
||
##
|
||
prefix: "alertmanager"
|
||
|
||
## Configuration for Alertmanager service
|
||
##
|
||
service:
|
||
annotations: {}
|
||
labels: {}
|
||
clusterIP: ""
|
||
|
||
## Port for Alertmanager Service to listen on
|
||
##
|
||
port: 9093
|
||
## To be used with a proxy extraContainer port
|
||
##
|
||
targetPort: 9093
|
||
## Port to expose on each node
|
||
## Only used if service.type is 'NodePort'
|
||
##
|
||
nodePort: 30903
|
||
## List of IP addresses at which the Prometheus server service is available
|
||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||
##
|
||
externalIPs: []
|
||
loadBalancerIP: ""
|
||
loadBalancerSourceRanges: []
|
||
## Service type
|
||
##
|
||
type: ClusterIP
|
||
|
||
## Configuration for creating a separate Service for each statefulset Alertmanager replica
|
||
##
|
||
servicePerReplica:
|
||
enabled: false
|
||
annotations: {}
|
||
|
||
## Port for Alertmanager Service per replica to listen on
|
||
##
|
||
port: 9093
|
||
|
||
## To be used with a proxy extraContainer port
|
||
targetPort: 9093
|
||
|
||
## Port to expose on each node
|
||
## Only used if servicePerReplica.type is 'NodePort'
|
||
##
|
||
nodePort: 30904
|
||
|
||
## Loadbalancer source IP ranges
|
||
## Only used if servicePerReplica.type is "loadbalancer"
|
||
loadBalancerSourceRanges: []
|
||
## Service type
|
||
##
|
||
type: ClusterIP
|
||
|
||
## If true, create a serviceMonitor for alertmanager
|
||
##
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
selfMonitor: true
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Settings affecting alertmanagerSpec
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
|
||
##
|
||
alertmanagerSpec:
|
||
## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
|
||
##
|
||
podMetadata: {}
|
||
|
||
## Image of Alertmanager
|
||
##
|
||
image:
|
||
repository: rancher/prom-alertmanager
|
||
tag: v0.21.0
|
||
sha: ""
|
||
|
||
## If true then the user will be responsible to provide a secret with alertmanager configuration
|
||
## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
|
||
##
|
||
useExistingSecret: false
|
||
|
||
## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
|
||
## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
|
||
##
|
||
secrets: []
|
||
|
||
## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
|
||
## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
|
||
##
|
||
configMaps: []
|
||
|
||
## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
|
||
## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
|
||
##
|
||
# configSecret:
|
||
|
||
## Define Log Format
|
||
# Use logfmt (default) or json-formatted logging
|
||
logFormat: logfmt
|
||
|
||
## Log level for Alertmanager to be configured with.
|
||
##
|
||
logLevel: info
|
||
|
||
## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
|
||
## running cluster equal to the expected size.
|
||
replicas: 1
|
||
|
||
## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
|
||
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
|
||
##
|
||
retention: 120h
|
||
|
||
## Storage is the definition of how storage will be used by the Alertmanager instances.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
||
##
|
||
storage: {}
|
||
# volumeClaimTemplate:
|
||
# spec:
|
||
# storageClassName: gluster
|
||
# accessModes: ["ReadWriteOnce"]
|
||
# resources:
|
||
# requests:
|
||
# storage: 50Gi
|
||
# selector: {}
|
||
|
||
|
||
## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
|
||
##
|
||
externalUrl:
|
||
|
||
## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
|
||
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
|
||
##
|
||
routePrefix: /
|
||
|
||
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
|
||
##
|
||
paused: false
|
||
|
||
## Define which Nodes the Pods are scheduled on.
|
||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||
##
|
||
nodeSelector: {}
|
||
|
||
## Define resources requests and limits for single Pods.
|
||
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
|
||
##
|
||
resources:
|
||
limits:
|
||
memory: 500Mi
|
||
cpu: 1000m
|
||
requests:
|
||
memory: 100Mi
|
||
cpu: 100m
|
||
|
||
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
||
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
||
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
|
||
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
|
||
##
|
||
podAntiAffinity: ""
|
||
|
||
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
|
||
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
|
||
##
|
||
podAntiAffinityTopologyKey: kubernetes.io/hostname
|
||
|
||
## Assign custom affinity rules to the alertmanager instance
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||
##
|
||
affinity: {}
|
||
# nodeAffinity:
|
||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||
# nodeSelectorTerms:
|
||
# - matchExpressions:
|
||
# - key: kubernetes.io/e2e-az-name
|
||
# operator: In
|
||
# values:
|
||
# - e2e-az1
|
||
# - e2e-az2
|
||
|
||
## If specified, the pod's tolerations.
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||
##
|
||
tolerations: []
|
||
# - key: "key"
|
||
# operator: "Equal"
|
||
# value: "value"
|
||
# effect: "NoSchedule"
|
||
|
||
## SecurityContext holds pod-level security attributes and common container settings.
|
||
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
|
||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||
##
|
||
securityContext:
|
||
runAsGroup: 2000
|
||
runAsNonRoot: true
|
||
runAsUser: 1000
|
||
fsGroup: 2000
|
||
|
||
## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
|
||
## Note this is only for the Alertmanager UI, not the gossip communication.
|
||
##
|
||
listenLocal: false
|
||
|
||
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
|
||
##
|
||
containers: []
|
||
|
||
## Priority class assigned to the Pods
|
||
##
|
||
priorityClassName: ""
|
||
|
||
## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
|
||
##
|
||
additionalPeers: []
|
||
|
||
## PortName to use for Alert Manager.
|
||
##
|
||
portName: "web"
|
||
|
||
|
||
## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
|
||
##
|
||
grafana:
|
||
enabled: true
|
||
namespaceOverride: ""
|
||
|
||
## Grafana's primary configuration
|
||
## NOTE: values in map will be converted to ini format
|
||
## ref: http://docs.grafana.org/installation/configuration/
|
||
##
|
||
grafana.ini:
|
||
users:
|
||
auto_assign_org_role: Viewer
|
||
auth:
|
||
disable_login_form: false
|
||
auth.anonymous:
|
||
enabled: true
|
||
org_role: Viewer
|
||
auth.basic:
|
||
enabled: false
|
||
dashboards:
|
||
# Modify this value to change the default dashboard shown on the main Grafana page
|
||
default_home_dashboard_path: /tmp/dashboards/rancher-default-home.json
|
||
|
||
deploymentStrategy:
|
||
type: Recreate
|
||
|
||
## Deploy default dashboards.
|
||
##
|
||
defaultDashboardsEnabled: true
|
||
|
||
adminPassword: prom-operator
|
||
|
||
ingress:
|
||
## If true, Grafana Ingress will be created
|
||
##
|
||
enabled: false
|
||
|
||
## Annotations for Grafana Ingress
|
||
##
|
||
annotations: {}
|
||
# kubernetes.io/ingress.class: nginx
|
||
# kubernetes.io/tls-acme: "true"
|
||
|
||
## Labels to be added to the Ingress
|
||
##
|
||
labels: {}
|
||
|
||
## Hostnames.
|
||
## Must be provided if Ingress is enable.
|
||
##
|
||
# hosts:
|
||
# - grafana.domain.com
|
||
hosts: []
|
||
|
||
## Path for grafana ingress
|
||
path: /
|
||
|
||
## TLS configuration for grafana Ingress
|
||
## Secret must be manually created in the namespace
|
||
##
|
||
tls: []
|
||
# - secretName: grafana-general-tls
|
||
# hosts:
|
||
# - grafana.example.com
|
||
|
||
sidecar:
|
||
dashboards:
|
||
enabled: true
|
||
label: grafana_dashboard
|
||
searchNamespace: cattle-dashboards
|
||
|
||
## Annotations for Grafana dashboard configmaps
|
||
##
|
||
annotations: {}
|
||
datasources:
|
||
enabled: true
|
||
defaultDatasourceEnabled: true
|
||
|
||
## Annotations for Grafana datasource configmaps
|
||
##
|
||
annotations: {}
|
||
|
||
## Create datasource for each Pod of Prometheus StatefulSet;
|
||
## this uses headless service `prometheus-operated` which is
|
||
## created by Prometheus Operator
|
||
## ref: https://git.io/fjaBS
|
||
createPrometheusReplicasDatasources: false
|
||
label: grafana_datasource
|
||
|
||
extraConfigmapMounts: []
|
||
# - name: certs-configmap
|
||
# mountPath: /etc/grafana/ssl/
|
||
# configMap: certs-configmap
|
||
# readOnly: true
|
||
|
||
## Configure additional grafana datasources (passed through tpl)
|
||
## ref: http://docs.grafana.org/administration/provisioning/#datasources
|
||
additionalDataSources: []
|
||
# - name: prometheus-sample
|
||
# access: proxy
|
||
# basicAuth: true
|
||
# basicAuthPassword: pass
|
||
# basicAuthUser: daco
|
||
# editable: false
|
||
# jsonData:
|
||
# tlsSkipVerify: true
|
||
# orgId: 1
|
||
# type: prometheus
|
||
# url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
|
||
# version: 1
|
||
|
||
## Passed to grafana subchart and used by servicemonitor below
|
||
##
|
||
service:
|
||
portName: nginx-http
|
||
## Port for Grafana Service to listen on
|
||
##
|
||
port: 80
|
||
## To be used with a proxy extraContainer port
|
||
##
|
||
targetPort: 8080
|
||
## Port to expose on each node
|
||
## Only used if service.type is 'NodePort'
|
||
##
|
||
nodePort: 30950
|
||
## Service type
|
||
##
|
||
type: ClusterIP
|
||
|
||
proxy:
|
||
image:
|
||
repository: rancher/library-nginx
|
||
tag: 1.19.2-alpine
|
||
|
||
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
||
extraContainers: |
|
||
- name: grafana-proxy
|
||
args:
|
||
- nginx
|
||
- -g
|
||
- daemon off;
|
||
- -c
|
||
- /nginx/nginx.conf
|
||
image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
|
||
ports:
|
||
- containerPort: 8080
|
||
name: nginx-http
|
||
protocol: TCP
|
||
volumeMounts:
|
||
- mountPath: /nginx
|
||
name: grafana-nginx
|
||
- mountPath: /var/cache/nginx
|
||
name: nginx-home
|
||
securityContext:
|
||
runAsUser: 101
|
||
runAsGroup: 101
|
||
|
||
## Volumes that can be used in containers
|
||
extraContainerVolumes:
|
||
- name: nginx-home
|
||
emptyDir: {}
|
||
- name: grafana-nginx
|
||
configMap:
|
||
name: grafana-nginx-proxy-config
|
||
items:
|
||
- key: nginx.conf
|
||
mode: 438
|
||
path: nginx.conf
|
||
|
||
## If true, create a serviceMonitor for grafana
|
||
##
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
selfMonitor: true
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
resources:
|
||
limits:
|
||
memory: 200Mi
|
||
cpu: 200m
|
||
requests:
|
||
memory: 100Mi
|
||
cpu: 100m
|
||
|
||
## Component scraping the kube api server
|
||
##
|
||
kubeApiServer:
|
||
enabled: true
|
||
tlsConfig:
|
||
serverName: kubernetes
|
||
insecureSkipVerify: false
|
||
|
||
## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels:
|
||
# - __meta_kubernetes_namespace
|
||
# - __meta_kubernetes_service_name
|
||
# - __meta_kubernetes_endpoint_port_name
|
||
# action: keep
|
||
# regex: default;kubernetes;https
|
||
# - targetLabel: __address__
|
||
# replacement: kubernetes.default.svc:443
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
jobLabel: component
|
||
selector:
|
||
matchLabels:
|
||
component: apiserver
|
||
provider: kubernetes
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
## Component scraping the kubelet and kubelet-hosted cAdvisor
|
||
##
|
||
kubelet:
|
||
enabled: true
|
||
namespace: kube-system
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## Enable scraping the kubelet over https. For requirements to enable this see
|
||
## https://github.com/prometheus-operator/prometheus-operator/issues/926
|
||
##
|
||
https: true
|
||
|
||
## Enable scraping /metrics/cadvisor from kubelet's service
|
||
##
|
||
cAdvisor: true
|
||
|
||
## Enable scraping /metrics/probes from kubelet's service
|
||
##
|
||
probes: true
|
||
|
||
## Enable scraping /metrics/resource from kubelet's service
|
||
##
|
||
resource: true
|
||
# From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
|
||
resourcePath: "/metrics/resource/v1alpha1"
|
||
## Metric relabellings to apply to samples before ingestion
|
||
##
|
||
cAdvisorMetricRelabelings: []
|
||
# - sourceLabels: [__name__, image]
|
||
# separator: ;
|
||
# regex: container_([a-z_]+);
|
||
# replacement: $1
|
||
# action: drop
|
||
# - sourceLabels: [__name__]
|
||
# separator: ;
|
||
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
||
# replacement: $1
|
||
# action: drop
|
||
|
||
## Metric relabellings to apply to samples before ingestion
|
||
##
|
||
probesMetricRelabelings: []
|
||
# - sourceLabels: [__name__, image]
|
||
# separator: ;
|
||
# regex: container_([a-z_]+);
|
||
# replacement: $1
|
||
# action: drop
|
||
# - sourceLabels: [__name__]
|
||
# separator: ;
|
||
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
||
# replacement: $1
|
||
# action: drop
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
# metrics_path is required to match upstream rules and charts
|
||
##
|
||
cAdvisorRelabelings:
|
||
- sourceLabels: [__metrics_path__]
|
||
targetLabel: metrics_path
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
probesRelabelings:
|
||
- sourceLabels: [__metrics_path__]
|
||
targetLabel: metrics_path
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
resourceRelabelings:
|
||
- sourceLabels: [__metrics_path__]
|
||
targetLabel: metrics_path
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
metricRelabelings: []
|
||
# - sourceLabels: [__name__, image]
|
||
# separator: ;
|
||
# regex: container_([a-z_]+);
|
||
# replacement: $1
|
||
# action: drop
|
||
# - sourceLabels: [__name__]
|
||
# separator: ;
|
||
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
||
# replacement: $1
|
||
# action: drop
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
# metrics_path is required to match upstream rules and charts
|
||
##
|
||
relabelings:
|
||
- sourceLabels: [__metrics_path__]
|
||
targetLabel: metrics_path
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Component scraping the kube controller manager
|
||
##
|
||
kubeControllerManager:
|
||
enabled: false
|
||
|
||
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
endpoints: []
|
||
# - 10.141.4.22
|
||
# - 10.141.4.23
|
||
# - 10.141.4.24
|
||
|
||
## If using kubeControllerManager.endpoints only the port and targetPort are used
|
||
##
|
||
service:
|
||
port: 10252
|
||
targetPort: 10252
|
||
# selector:
|
||
# component: kube-controller-manager
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## Enable scraping kube-controller-manager over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
https: false
|
||
|
||
# Skip TLS certificate validation when scraping
|
||
insecureSkipVerify: null
|
||
|
||
# Name of the server to use when validating TLS certificate
|
||
serverName: null
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Component scraping coreDns. Use either this or kubeDns
|
||
##
|
||
coreDns:
|
||
enabled: true
|
||
service:
|
||
port: 9153
|
||
targetPort: 9153
|
||
# selector:
|
||
# k8s-app: kube-dns
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Component scraping kubeDns. Use either this or coreDns
|
||
##
|
||
kubeDns:
|
||
enabled: false
|
||
service:
|
||
dnsmasq:
|
||
port: 10054
|
||
targetPort: 10054
|
||
skydns:
|
||
port: 10055
|
||
targetPort: 10055
|
||
# selector:
|
||
# k8s-app: kube-dns
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
dnsmasqMetricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
dnsmasqRelabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Component scraping etcd
|
||
##
|
||
kubeEtcd:
|
||
enabled: false
|
||
|
||
## If your etcd is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
endpoints: []
|
||
# - 10.141.4.22
|
||
# - 10.141.4.23
|
||
# - 10.141.4.24
|
||
|
||
## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
|
||
##
|
||
service:
|
||
port: 2379
|
||
targetPort: 2379
|
||
# selector:
|
||
# component: etcd
|
||
|
||
## Configure secure access to the etcd cluster by loading a secret into prometheus and
|
||
## specifying security configuration below. For example, with a secret named etcd-client-cert
|
||
##
|
||
## serviceMonitor:
|
||
## scheme: https
|
||
## insecureSkipVerify: false
|
||
## serverName: localhost
|
||
## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
|
||
## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
|
||
## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
|
||
##
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
scheme: http
|
||
insecureSkipVerify: false
|
||
serverName: ""
|
||
caFile: ""
|
||
certFile: ""
|
||
keyFile: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
|
||
## Component scraping kube scheduler
|
||
##
|
||
kubeScheduler:
|
||
enabled: false
|
||
|
||
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
endpoints: []
|
||
# - 10.141.4.22
|
||
# - 10.141.4.23
|
||
# - 10.141.4.24
|
||
|
||
## If using kubeScheduler.endpoints only the port and targetPort are used
|
||
##
|
||
service:
|
||
port: 10251
|
||
targetPort: 10251
|
||
# selector:
|
||
# component: kube-scheduler
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
## Enable scraping kube-scheduler over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
https: false
|
||
|
||
## Skip TLS certificate validation when scraping
|
||
insecureSkipVerify: null
|
||
|
||
## Name of the server to use when validating TLS certificate
|
||
serverName: null
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
|
||
## Component scraping kube proxy
|
||
##
|
||
kubeProxy:
|
||
enabled: false
|
||
|
||
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
|
||
##
|
||
endpoints: []
|
||
# - 10.141.4.22
|
||
# - 10.141.4.23
|
||
# - 10.141.4.24
|
||
|
||
service:
|
||
port: 10249
|
||
targetPort: 10249
|
||
# selector:
|
||
# k8s-app: kube-proxy
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## Enable scraping kube-proxy over https.
|
||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||
##
|
||
https: false
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
|
||
## Component scraping kube state metrics
|
||
##
|
||
kubeStateMetrics:
|
||
enabled: true
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Configuration for kube-state-metrics subchart
|
||
##
|
||
kube-state-metrics:
|
||
namespaceOverride: ""
|
||
rbac:
|
||
create: true
|
||
podSecurityPolicy:
|
||
enabled: true
|
||
resources:
|
||
limits:
|
||
cpu: 100m
|
||
memory: 200Mi
|
||
requests:
|
||
cpu: 100m
|
||
memory: 130Mi
|
||
|
||
## Deploy node exporter as a daemonset to all nodes
|
||
##
|
||
nodeExporter:
|
||
enabled: true
|
||
|
||
## Use the value configured in prometheus-node-exporter.podLabels
|
||
##
|
||
jobLabel: jobLabel
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
|
||
## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
|
||
##
|
||
scrapeTimeout: ""
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - sourceLabels: [__name__]
|
||
# separator: ;
|
||
# regex: ^node_mountstats_nfs_(event|operations|transport)_.+
|
||
# replacement: $1
|
||
# action: drop
|
||
|
||
## relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Configuration for prometheus-node-exporter subchart
|
||
##
|
||
prometheus-node-exporter:
|
||
namespaceOverride: ""
|
||
podLabels:
|
||
## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
|
||
##
|
||
jobLabel: node-exporter
|
||
extraArgs:
|
||
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
|
||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
|
||
service:
|
||
port: 9796
|
||
targetPort: 9796
|
||
resources:
|
||
limits:
|
||
cpu: 200m
|
||
memory: 50Mi
|
||
requests:
|
||
cpu: 100m
|
||
memory: 30Mi
|
||
|
||
## Manages Prometheus and Alertmanager components
|
||
##
|
||
prometheusOperator:
|
||
enabled: true
|
||
|
||
# If true prometheus operator will create and update its CRDs on startup
|
||
# Only for prometheusOperator.image.tag < v0.39.0
|
||
manageCrds: true
|
||
|
||
tlsProxy:
|
||
enabled: true
|
||
image:
|
||
repository: rancher/squareup-ghostunnel
|
||
tag: v1.5.2
|
||
sha: ""
|
||
pullPolicy: IfNotPresent
|
||
resources: {}
|
||
|
||
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
|
||
## rules from making their way into prometheus and potentially preventing the container from starting
|
||
admissionWebhooks:
|
||
failurePolicy: Fail
|
||
enabled: true
|
||
## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
|
||
## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
|
||
## certs ahead of time if you wish.
|
||
##
|
||
patch:
|
||
enabled: true
|
||
image:
|
||
repository: rancher/jettech-kube-webhook-certgen
|
||
tag: v1.2.1
|
||
sha: ""
|
||
pullPolicy: IfNotPresent
|
||
resources: {}
|
||
## Provide a priority class name to the webhook patching job
|
||
##
|
||
priorityClassName: ""
|
||
podAnnotations: {}
|
||
nodeSelector: {}
|
||
affinity: {}
|
||
tolerations: []
|
||
|
||
## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
|
||
## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
|
||
##
|
||
namespaces: {}
|
||
# releaseNamespace: true
|
||
# additional:
|
||
# - kube-system
|
||
|
||
## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
|
||
##
|
||
denyNamespaces: []
|
||
|
||
## Service account for Alertmanager to use.
|
||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||
##
|
||
serviceAccount:
|
||
create: true
|
||
name: ""
|
||
|
||
## Configuration for Prometheus operator service
|
||
##
|
||
service:
|
||
annotations: {}
|
||
labels: {}
|
||
clusterIP: ""
|
||
|
||
## Port to expose on each node
|
||
## Only used if service.type is 'NodePort'
|
||
##
|
||
nodePort: 30080
|
||
|
||
nodePortTls: 30443
|
||
|
||
## Additional ports to open for Prometheus service
|
||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
|
||
##
|
||
additionalPorts: []
|
||
|
||
## Loadbalancer IP
|
||
## Only use if service.type is "loadbalancer"
|
||
##
|
||
loadBalancerIP: ""
|
||
loadBalancerSourceRanges: []
|
||
|
||
## Service type
|
||
## NodePort, ClusterIP, loadbalancer
|
||
##
|
||
type: ClusterIP
|
||
|
||
## List of IP addresses at which the Prometheus server service is available
|
||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||
##
|
||
externalIPs: []
|
||
|
||
## Deploy CRDs used by Prometheus Operator.
|
||
##
|
||
createCustomResource: true
|
||
|
||
## Attempt to clean up CRDs created by Prometheus Operator.
|
||
##
|
||
cleanupCustomResource: false
|
||
|
||
## Labels to add to the operator pod
|
||
##
|
||
podLabels: {}
|
||
|
||
## Annotations to add to the operator pod
|
||
##
|
||
podAnnotations: {}
|
||
|
||
## Assign a PriorityClassName to pods if set
|
||
# priorityClassName: ""
|
||
|
||
## Define Log Format
|
||
# Use logfmt (default) or json-formatted logging
|
||
# logFormat: logfmt
|
||
|
||
## Decrease log verbosity to errors only
|
||
# logLevel: error
|
||
|
||
## If true, the operator will create and maintain a service for scraping kubelets
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/helm/prometheus-operator/README.md
|
||
##
|
||
kubeletService:
|
||
enabled: true
|
||
namespace: kube-system
|
||
|
||
## Create a servicemonitor for the operator
|
||
##
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
|
||
scrapeTimeout: ""
|
||
selfMonitor: true
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Resource limits & requests
|
||
##
|
||
resources:
|
||
limits:
|
||
cpu: 200m
|
||
memory: 500Mi
|
||
requests:
|
||
cpu: 100m
|
||
memory: 100Mi
|
||
|
||
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
|
||
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
|
||
##
|
||
hostNetwork: false
|
||
|
||
## Define which Nodes the Pods are scheduled on.
|
||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||
##
|
||
nodeSelector: {}
|
||
|
||
## Tolerations for use with node taints
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||
##
|
||
tolerations: []
|
||
# - key: "key"
|
||
# operator: "Equal"
|
||
# value: "value"
|
||
# effect: "NoSchedule"
|
||
|
||
## Assign custom affinity rules to the prometheus operator
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||
##
|
||
affinity: {}
|
||
# nodeAffinity:
|
||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||
# nodeSelectorTerms:
|
||
# - matchExpressions:
|
||
# - key: kubernetes.io/e2e-az-name
|
||
# operator: In
|
||
# values:
|
||
# - e2e-az1
|
||
# - e2e-az2
|
||
|
||
securityContext:
|
||
fsGroup: 65534
|
||
runAsGroup: 65534
|
||
runAsNonRoot: true
|
||
runAsUser: 65534
|
||
|
||
## Prometheus-operator image
|
||
##
|
||
image:
|
||
repository: rancher/coreos-prometheus-operator
|
||
tag: v0.38.1
|
||
sha: ""
|
||
pullPolicy: IfNotPresent
|
||
|
||
## Configmap-reload image to use for reloading configmaps
|
||
##
|
||
configmapReloadImage:
|
||
repository: rancher/jimmidyson-configmap-reload
|
||
tag: v0.3.0
|
||
sha: ""
|
||
|
||
## Prometheus-config-reloader image to use for config and rule reloading
|
||
##
|
||
prometheusConfigReloaderImage:
|
||
repository: rancher/coreos-prometheus-config-reloader
|
||
tag: v0.38.1
|
||
sha: ""
|
||
|
||
## Set the prometheus config reloader side-car CPU limit
|
||
##
|
||
configReloaderCpu: 100m
|
||
|
||
## Set the prometheus config reloader side-car memory limit
|
||
##
|
||
configReloaderMemory: 25Mi
|
||
|
||
## Set a Field Selector to filter watched secrets
|
||
##
|
||
secretFieldSelector: ""
|
||
|
||
## Deploy a Prometheus instance
|
||
##
|
||
prometheus:
|
||
|
||
enabled: true
|
||
|
||
## Annotations for Prometheus
|
||
##
|
||
annotations: {}
|
||
|
||
## Service account for Prometheuses to use.
|
||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||
##
|
||
serviceAccount:
|
||
create: true
|
||
name: ""
|
||
|
||
## Configuration for Prometheus service
|
||
##
|
||
service:
|
||
annotations: {}
|
||
labels: {}
|
||
clusterIP: ""
|
||
|
||
## Port for Prometheus Service to listen on
|
||
##
|
||
port: 9090
|
||
|
||
## To be used with a proxy extraContainer port
|
||
targetPort: 8080
|
||
|
||
## List of IP addresses at which the Prometheus server service is available
|
||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||
##
|
||
externalIPs: []
|
||
|
||
## Port to expose on each node
|
||
## Only used if service.type is 'NodePort'
|
||
##
|
||
nodePort: 30090
|
||
|
||
## Loadbalancer IP
|
||
## Only use if service.type is "loadbalancer"
|
||
loadBalancerIP: ""
|
||
loadBalancerSourceRanges: []
|
||
## Service type
|
||
##
|
||
type: ClusterIP
|
||
|
||
sessionAffinity: ""
|
||
|
||
## Configuration for creating a separate Service for each statefulset Prometheus replica
|
||
##
|
||
servicePerReplica:
|
||
enabled: false
|
||
annotations: {}
|
||
|
||
## Port for Prometheus Service per replica to listen on
|
||
##
|
||
port: 9090
|
||
|
||
## To be used with a proxy extraContainer port
|
||
targetPort: 9090
|
||
|
||
## Port to expose on each node
|
||
## Only used if servicePerReplica.type is 'NodePort'
|
||
##
|
||
nodePort: 30091
|
||
|
||
## Loadbalancer source IP ranges
|
||
## Only used if servicePerReplica.type is "loadbalancer"
|
||
loadBalancerSourceRanges: []
|
||
## Service type
|
||
##
|
||
type: ClusterIP
|
||
|
||
## Configure pod disruption budgets for Prometheus
|
||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
|
||
## This configuration is immutable once created and will require the PDB to be deleted to be changed
|
||
## https://github.com/kubernetes/kubernetes/issues/45398
|
||
##
|
||
podDisruptionBudget:
|
||
enabled: false
|
||
minAvailable: 1
|
||
maxUnavailable: ""
|
||
|
||
# Ingress exposes thanos sidecar outside the clsuter
|
||
thanosIngress:
|
||
enabled: false
|
||
annotations: {}
|
||
labels: {}
|
||
servicePort: 10901
|
||
## Hosts must be provided if Ingress is enabled.
|
||
##
|
||
hosts: []
|
||
# - thanos-gateway.domain.com
|
||
|
||
## Paths to use for ingress rules
|
||
##
|
||
paths: []
|
||
# - /
|
||
|
||
## TLS configuration for Alertmanager Ingress
|
||
## Secret must be manually created in the namespace
|
||
##
|
||
tls: []
|
||
# - secretName: thanos-gateway-tls
|
||
# hosts:
|
||
# - thanos-gateway.domain.com
|
||
|
||
ingress:
|
||
enabled: false
|
||
annotations: {}
|
||
labels: {}
|
||
|
||
## Hostnames.
|
||
## Must be provided if Ingress is enabled.
|
||
##
|
||
# hosts:
|
||
# - prometheus.domain.com
|
||
hosts: []
|
||
|
||
## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
|
||
##
|
||
paths: []
|
||
# - /
|
||
|
||
## TLS configuration for Prometheus Ingress
|
||
## Secret must be manually created in the namespace
|
||
##
|
||
tls: []
|
||
# - secretName: prometheus-general-tls
|
||
# hosts:
|
||
# - prometheus.example.com
|
||
|
||
## Configuration for creating an Ingress that will map to each Prometheus replica service
|
||
## prometheus.servicePerReplica must be enabled
|
||
##
|
||
ingressPerReplica:
|
||
enabled: false
|
||
annotations: {}
|
||
labels: {}
|
||
|
||
## Final form of the hostname for each per replica ingress is
|
||
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
|
||
##
|
||
## Prefix for the per replica ingress that will have `-$replicaNumber`
|
||
## appended to the end
|
||
hostPrefix: ""
|
||
## Domain that will be used for the per replica ingress
|
||
hostDomain: ""
|
||
|
||
## Paths to use for ingress rules
|
||
##
|
||
paths: []
|
||
# - /
|
||
|
||
## Secret name containing the TLS certificate for Prometheus per replica ingress
|
||
## Secret must be manually created in the namespace
|
||
tlsSecretName: ""
|
||
|
||
## Separated secret for each per replica Ingress. Can be used together with cert-manager
|
||
##
|
||
tlsSecretPerReplica:
|
||
enabled: false
|
||
## Final form of the secret for each per replica ingress is
|
||
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
|
||
##
|
||
prefix: "prometheus"
|
||
|
||
## Configure additional options for default pod security policy for Prometheus
|
||
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||
podSecurityPolicy:
|
||
allowedCapabilities: []
|
||
|
||
serviceMonitor:
|
||
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
##
|
||
interval: ""
|
||
selfMonitor: true
|
||
|
||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||
scheme: ""
|
||
|
||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||
tlsConfig: {}
|
||
|
||
bearerTokenFile:
|
||
|
||
## metric relabel configs to apply to samples before ingestion.
|
||
##
|
||
metricRelabelings: []
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
|
||
# relabel configs to apply to samples before ingestion.
|
||
##
|
||
relabelings: []
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
## Settings affecting prometheusSpec
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||
##
|
||
prometheusSpec:
|
||
## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
|
||
##
|
||
disableCompaction: false
|
||
## APIServerConfig
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig
|
||
##
|
||
apiserverConfig: {}
|
||
|
||
## Interval between consecutive scrapes.
|
||
##
|
||
scrapeInterval: ""
|
||
|
||
## Interval between consecutive evaluations.
|
||
##
|
||
evaluationInterval: ""
|
||
|
||
## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
|
||
##
|
||
listenLocal: false
|
||
|
||
## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
|
||
## This is disabled by default.
|
||
## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
|
||
##
|
||
enableAdminAPI: false
|
||
|
||
## Image of Prometheus.
|
||
##
|
||
image:
|
||
repository: rancher/prom-prometheus
|
||
tag: v2.18.2
|
||
sha: ""
|
||
|
||
## Tolerations for use with node taints
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||
##
|
||
tolerations: []
|
||
# - key: "key"
|
||
# operator: "Equal"
|
||
# value: "value"
|
||
# effect: "NoSchedule"
|
||
|
||
## Alertmanagers to which alerts will be sent
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints
|
||
##
|
||
## Default configuration will connect to the alertmanager deployed as part of this release
|
||
##
|
||
alertingEndpoints: []
|
||
# - name: ""
|
||
# namespace: ""
|
||
# port: http
|
||
# scheme: http
|
||
# pathPrefix: ""
|
||
# tlsConfig: {}
|
||
# bearerTokenFile: ""
|
||
# apiVersion: v2
|
||
|
||
## External labels to add to any time series or alerts when communicating with external systems
|
||
##
|
||
externalLabels: {}
|
||
|
||
## Name of the external label used to denote replica name
|
||
##
|
||
replicaExternalLabelName: ""
|
||
|
||
## If true, the Operator won't add the external label used to denote replica name
|
||
##
|
||
replicaExternalLabelNameClear: false
|
||
|
||
## Name of the external label used to denote Prometheus instance name
|
||
##
|
||
prometheusExternalLabelName: ""
|
||
|
||
## If true, the Operator won't add the external label used to denote Prometheus instance name
|
||
##
|
||
prometheusExternalLabelNameClear: false
|
||
|
||
## External URL at which Prometheus will be reachable.
|
||
##
|
||
externalUrl: ""
|
||
|
||
## Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs
|
||
## If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into
|
||
##
|
||
ignoreNamespaceSelectors: false
|
||
|
||
## Define which Nodes the Pods are scheduled on.
|
||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||
##
|
||
nodeSelector: {}
|
||
|
||
## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
|
||
## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
|
||
## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
|
||
## with the new list of secrets.
|
||
##
|
||
secrets: []
|
||
|
||
## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
|
||
## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
|
||
##
|
||
configMaps: []
|
||
|
||
## QuerySpec defines the query command line flags when starting Prometheus.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec
|
||
##
|
||
query: {}
|
||
|
||
## Namespaces to be selected for PrometheusRules discovery.
|
||
## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
|
||
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
|
||
##
|
||
ruleNamespaceSelector: {}
|
||
|
||
## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the PrometheusRule resources created
|
||
##
|
||
ruleSelectorNilUsesHelmValues: false
|
||
|
||
## PrometheusRules to be selected for target discovery.
|
||
## If {}, select all ServiceMonitors
|
||
##
|
||
ruleSelector: {}
|
||
## Example which select all prometheusrules resources
|
||
## with label "prometheus" with values any of "example-rules" or "example-rules-2"
|
||
# ruleSelector:
|
||
# matchExpressions:
|
||
# - key: prometheus
|
||
# operator: In
|
||
# values:
|
||
# - example-rules
|
||
# - example-rules-2
|
||
#
|
||
## Example which select all prometheusrules resources with label "role" set to "example-rules"
|
||
# ruleSelector:
|
||
# matchLabels:
|
||
# role: example-rules
|
||
|
||
## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the servicemonitors created
|
||
##
|
||
serviceMonitorSelectorNilUsesHelmValues: false
|
||
|
||
## ServiceMonitors to be selected for target discovery.
|
||
## If {}, select all ServiceMonitors
|
||
##
|
||
serviceMonitorSelector: {}
|
||
## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
|
||
# serviceMonitorSelector:
|
||
# matchLabels:
|
||
# prometheus: somelabel
|
||
|
||
## Namespaces to be selected for ServiceMonitor discovery.
|
||
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
|
||
##
|
||
serviceMonitorNamespaceSelector: {}
|
||
|
||
## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
|
||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||
## which will also match the podmonitors created
|
||
##
|
||
podMonitorSelectorNilUsesHelmValues: false
|
||
|
||
## PodMonitors to be selected for target discovery.
|
||
## If {}, select all PodMonitors
|
||
##
|
||
podMonitorSelector: {}
|
||
## Example which selects PodMonitors with label "prometheus" set to "somelabel"
|
||
# podMonitorSelector:
|
||
# matchLabels:
|
||
# prometheus: somelabel
|
||
|
||
## Namespaces to be selected for PodMonitor discovery.
|
||
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
|
||
##
|
||
podMonitorNamespaceSelector: {}
|
||
|
||
## How long to retain metrics
|
||
##
|
||
retention: 10d
|
||
|
||
## Maximum size of metrics
|
||
##
|
||
retentionSize: ""
|
||
|
||
## Enable compression of the write-ahead log using Snappy.
|
||
##
|
||
walCompression: false
|
||
|
||
## If true, the Operator won't process any Prometheus configuration changes
|
||
##
|
||
paused: false
|
||
|
||
## Number of Prometheus replicas desired
|
||
##
|
||
replicas: 1
|
||
|
||
## Log level for Prometheus be configured in
|
||
##
|
||
logLevel: info
|
||
|
||
## Log format for Prometheus be configured in
|
||
##
|
||
logFormat: logfmt
|
||
|
||
## Prefix used to register routes, overriding externalUrl route.
|
||
## Useful for proxies that rewrite URLs.
|
||
##
|
||
routePrefix: /
|
||
|
||
## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||
## Metadata Labels and Annotations gets propagated to the prometheus pods.
|
||
##
|
||
podMetadata: {}
|
||
# labels:
|
||
# app: prometheus
|
||
# k8s-app: prometheus
|
||
|
||
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
||
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
||
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
|
||
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
|
||
podAntiAffinity: ""
|
||
|
||
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
|
||
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
|
||
##
|
||
podAntiAffinityTopologyKey: kubernetes.io/hostname
|
||
|
||
## Assign custom affinity rules to the prometheus instance
|
||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||
##
|
||
affinity: {}
|
||
# nodeAffinity:
|
||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||
# nodeSelectorTerms:
|
||
# - matchExpressions:
|
||
# - key: kubernetes.io/e2e-az-name
|
||
# operator: In
|
||
# values:
|
||
# - e2e-az1
|
||
# - e2e-az2
|
||
|
||
## The remote_read spec configuration for Prometheus.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
|
||
remoteRead: []
|
||
# - url: http://remote1/read
|
||
|
||
## The remote_write spec configuration for Prometheus.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
|
||
remoteWrite: []
|
||
# - url: http://remote1/push
|
||
|
||
## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
|
||
remoteWriteDashboards: false
|
||
|
||
## Resource limits & requests
|
||
##
|
||
resources:
|
||
limits:
|
||
memory: 1500Mi
|
||
cpu: 1000m
|
||
requests:
|
||
memory: 750Mi
|
||
cpu: 750m
|
||
|
||
## Prometheus StorageSpec for persistent data
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
||
##
|
||
storageSpec: {}
|
||
# volumeClaimTemplate:
|
||
# spec:
|
||
# storageClassName: gluster
|
||
# accessModes: ["ReadWriteOnce"]
|
||
# resources:
|
||
# requests:
|
||
# storage: 50Gi
|
||
# selector: {}
|
||
|
||
## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
|
||
## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
|
||
## as specified in the official Prometheus documentation:
|
||
## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
|
||
## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
|
||
## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
|
||
## scrape configs are going to break Prometheus after the upgrade.
|
||
##
|
||
## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the
|
||
## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
|
||
##
|
||
additionalScrapeConfigs: []
|
||
# - job_name: kube-etcd
|
||
# kubernetes_sd_configs:
|
||
# - role: node
|
||
# scheme: https
|
||
# tls_config:
|
||
# ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
|
||
# cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
|
||
# key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
|
||
# relabel_configs:
|
||
# - action: labelmap
|
||
# regex: __meta_kubernetes_node_label_(.+)
|
||
# - source_labels: [__address__]
|
||
# action: replace
|
||
# targetLabel: __address__
|
||
# regex: ([^:;]+):(\d+)
|
||
# replacement: ${1}:2379
|
||
# - source_labels: [__meta_kubernetes_node_name]
|
||
# action: keep
|
||
# regex: .*mst.*
|
||
# - source_labels: [__meta_kubernetes_node_name]
|
||
# action: replace
|
||
# targetLabel: node
|
||
# regex: (.*)
|
||
# replacement: ${1}
|
||
# metric_relabel_configs:
|
||
# - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
|
||
# action: labeldrop
|
||
|
||
## If additional scrape configurations are already deployed in a single secret file you can use this section.
|
||
## Expected values are the secret name and key
|
||
## Cannot be used with additionalScrapeConfigs
|
||
additionalScrapeConfigsSecret: {}
|
||
# enabled: false
|
||
# name:
|
||
# key:
|
||
|
||
## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
|
||
## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
|
||
additionalPrometheusSecretsAnnotations: {}
|
||
|
||
## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
|
||
## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
|
||
## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
|
||
## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
|
||
## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
|
||
## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
|
||
##
|
||
additionalAlertManagerConfigs: []
|
||
# - consul_sd_configs:
|
||
# - server: consul.dev.test:8500
|
||
# scheme: http
|
||
# datacenter: dev
|
||
# tag_separator: ','
|
||
# services:
|
||
# - metrics-prometheus-alertmanager
|
||
|
||
## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
|
||
## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
|
||
## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
|
||
## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
|
||
## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
|
||
## configs are going to break Prometheus after the upgrade.
|
||
##
|
||
additionalAlertRelabelConfigs: []
|
||
# - separator: ;
|
||
# regex: prometheus_replica
|
||
# replacement: $1
|
||
# action: labeldrop
|
||
|
||
## SecurityContext holds pod-level security attributes and common container settings.
|
||
## This defaults to non root user with uid 1000 and gid 2000.
|
||
## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md
|
||
##
|
||
securityContext:
|
||
runAsGroup: 2000
|
||
runAsNonRoot: true
|
||
runAsUser: 1000
|
||
fsGroup: 2000
|
||
|
||
## Priority class assigned to the Pods
|
||
##
|
||
priorityClassName: ""
|
||
|
||
## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
|
||
## This section is experimental, it may change significantly without deprecation notice in any release.
|
||
## This is experimental and may change significantly without backward compatibility in any release.
|
||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#thanosspec
|
||
##
|
||
thanos: {}
|
||
|
||
proxy:
|
||
image:
|
||
repository: rancher/library-nginx
|
||
tag: 1.19.2-alpine
|
||
|
||
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
|
||
## if using proxy extraContainer update targetPort with proxy container port
|
||
containers: |
|
||
- name: prometheus-proxy
|
||
args:
|
||
- nginx
|
||
- -g
|
||
- daemon off;
|
||
- -c
|
||
- /nginx/nginx.conf
|
||
image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}"
|
||
ports:
|
||
- containerPort: 8080
|
||
name: nginx-http
|
||
protocol: TCP
|
||
volumeMounts:
|
||
- mountPath: /nginx
|
||
name: prometheus-nginx
|
||
- mountPath: /var/cache/nginx
|
||
name: nginx-home
|
||
securityContext:
|
||
runAsUser: 101
|
||
runAsGroup: 101
|
||
|
||
# Additional volumes on the output StatefulSet definition.
|
||
volumes:
|
||
- name: nginx-home
|
||
emptyDir: {}
|
||
- name: prometheus-nginx
|
||
configMap:
|
||
name: prometheus-nginx-proxy-config
|
||
defaultMode: 438
|
||
|
||
# Additional VolumeMounts on the output StatefulSet definition.
|
||
volumeMounts: []
|
||
|
||
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
|
||
## (permissions, dir tree) on mounted volumes before starting prometheus
|
||
initContainers: []
|
||
|
||
## PortName to use for Prometheus.
|
||
##
|
||
portName: "nginx-http"
|
||
|
||
additionalServiceMonitors: []
|
||
## Name of the ServiceMonitor to create
|
||
##
|
||
# - name: ""
|
||
|
||
## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
|
||
## the chart
|
||
##
|
||
# additionalLabels: {}
|
||
|
||
## Service label for use in assembling a job name of the form <label value>-<port>
|
||
## If no label is specified, the service name is used.
|
||
##
|
||
# jobLabel: ""
|
||
|
||
## labels to transfer from the kubernetes service to the target
|
||
##
|
||
# targetLabels: ""
|
||
|
||
## Label selector for services to which this ServiceMonitor applies
|
||
##
|
||
# selector: {}
|
||
|
||
## Namespaces from which services are selected
|
||
##
|
||
# namespaceSelector:
|
||
## Match any namespace
|
||
##
|
||
# any: false
|
||
|
||
## Explicit list of namespace names to select
|
||
##
|
||
# matchNames: []
|
||
|
||
## Endpoints of the selected service to be monitored
|
||
##
|
||
# endpoints: []
|
||
## Name of the endpoint's service port
|
||
## Mutually exclusive with targetPort
|
||
# - port: ""
|
||
|
||
## Name or number of the endpoint's target port
|
||
## Mutually exclusive with port
|
||
# - targetPort: ""
|
||
|
||
## File containing bearer token to be used when scraping targets
|
||
##
|
||
# bearerTokenFile: ""
|
||
|
||
## Interval at which metrics should be scraped
|
||
##
|
||
# interval: 30s
|
||
|
||
## HTTP path to scrape for metrics
|
||
##
|
||
# path: /metrics
|
||
|
||
## HTTP scheme to use for scraping
|
||
##
|
||
# scheme: http
|
||
|
||
## TLS configuration to use when scraping the endpoint
|
||
##
|
||
# tlsConfig:
|
||
|
||
## Path to the CA file
|
||
##
|
||
# caFile: ""
|
||
|
||
## Path to client certificate file
|
||
##
|
||
# certFile: ""
|
||
|
||
## Skip certificate verification
|
||
##
|
||
# insecureSkipVerify: false
|
||
|
||
## Path to client key file
|
||
##
|
||
# keyFile: ""
|
||
|
||
## Server name used to verify host name
|
||
##
|
||
# serverName: ""
|
||
|
||
additionalPodMonitors: []
|
||
## Name of the PodMonitor to create
|
||
##
|
||
# - name: ""
|
||
|
||
## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
|
||
## the chart
|
||
##
|
||
# additionalLabels: {}
|
||
|
||
## Pod label for use in assembling a job name of the form <label value>-<port>
|
||
## If no label is specified, the pod endpoint name is used.
|
||
##
|
||
# jobLabel: ""
|
||
|
||
## Label selector for pods to which this PodMonitor applies
|
||
##
|
||
# selector: {}
|
||
|
||
## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
|
||
##
|
||
# podTargetLabels: {}
|
||
|
||
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
|
||
##
|
||
# sampleLimit: 0
|
||
|
||
## Namespaces from which pods are selected
|
||
##
|
||
# namespaceSelector:
|
||
## Match any namespace
|
||
##
|
||
# any: false
|
||
|
||
## Explicit list of namespace names to select
|
||
##
|
||
# matchNames: []
|
||
|
||
## Endpoints of the selected pods to be monitored
|
||
## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmetricsendpoint
|
||
##
|
||
# podMetricsEndpoints: []
|