mirror of https://git.rancher.io/charts
1377 lines
46 KiB
YAML
1377 lines
46 KiB
YAML
# Default values for project-prometheus-stack.
|
|
# This is a YAML-formatted file.
|
|
# Declare variables to be passed into your templates.
|
|
|
|
# Rancher Project Monitoring Configuration
|
|
|
|
## Provide a name in place of project-prometheus-stack for `app:` labels
|
|
## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
|
##
|
|
nameOverride: "rancher-project-monitoring"
|
|
|
|
## Override the deployment namespace
|
|
## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url
|
|
##
|
|
namespaceOverride: ""
|
|
|
|
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
|
|
##
|
|
kubeTargetVersionOverride: ""
|
|
|
|
## Allow kubeVersion to be overridden while creating the ingress
|
|
##
|
|
kubeVersionOverride: ""
|
|
|
|
## Provide a name to substitute for the full names of resources
|
|
##
|
|
fullnameOverride: ""
|
|
|
|
## Labels to apply to all resources
|
|
##
|
|
commonLabels: {}
|
|
# scmhash: abc123
|
|
# myLabel: aakkmd
|
|
|
|
## Create default rules for monitoring the cluster
|
|
##
|
|
defaultRules:
|
|
create: true
|
|
rules:
|
|
general: true
|
|
prometheus: true
|
|
alertmanager: true
|
|
kubernetesApps: true
|
|
kubernetesStorage: true
|
|
|
|
## Runbook url prefix for default rules
|
|
runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#
|
|
## Reduce app namespace alert scope
|
|
appNamespacesTarget: ".*"
|
|
|
|
## Labels for default rules
|
|
labels: {}
|
|
## Annotations for default rules
|
|
annotations: {}
|
|
|
|
## Additional labels for PrometheusRule alerts
|
|
additionalRuleLabels: {}
|
|
|
|
##
|
|
global:
|
|
cattle:
|
|
systemDefaultRegistry: ""
|
|
projectNamespaceSelector: {}
|
|
projectNamespaces: []
|
|
kubectl:
|
|
repository: rancher/kubectl
|
|
tag: v1.20.2
|
|
pullPolicy: IfNotPresent
|
|
rbac:
|
|
## Create RBAC resources for ServiceAccounts and users
|
|
##
|
|
create: true
|
|
|
|
userRoles:
|
|
## Create default user Roles that the Helm Project Operator will automatically create RoleBindings for
|
|
##
|
|
## How does this work?
|
|
##
|
|
## The operator will watch for all subjects bound to each Kubernetes default ClusterRole in the project registration namespace
|
|
## where the ProjectHelmChart that deployed this chart belongs to; if it observes a subject bound to a particular role in
|
|
## the project registration namespace (e.g. edit) and if a Role exists that is deployed by this chart with the label
|
|
## 'helm.cattle.io/project-helm-chart-role-aggregate-from': '<role, e.g. edit>', it will automaticaly create a RoleBinding
|
|
## in the release namespace binding all such subjects to that Role.
|
|
##
|
|
## Note: while the default behavior is to use the Kubernetes default ClusterRole, the operator deployment (prometheus-federator)
|
|
## can be configured to use a different set of ClusterRoles as the source of truth for admin, edit, and view permissions.
|
|
##
|
|
create: true
|
|
## Add labels to Roles
|
|
aggregateToDefaultRoles: true
|
|
|
|
pspEnabled: true
|
|
pspAnnotations: {}
|
|
## Specify pod annotations
|
|
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
|
|
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
|
|
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
|
|
##
|
|
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
|
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
|
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
|
|
|
## Reference to one or more secrets to be used when pulling images
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
##
|
|
imagePullSecrets: []
|
|
# - name: "image-pull-secret"
|
|
|
|
federate:
|
|
## enabled indicates whether to add federation to any Project Prometheus Stacks by default
|
|
## If not enabled, no federation will be turned on
|
|
enabled: true
|
|
|
|
# Change this to point at all Prometheuses you want all your Project Prometheus Stacks to federate from
|
|
# By default, this matches the default deployment of Rancher Monitoring
|
|
targets:
|
|
- rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090
|
|
|
|
## Scrape interval
|
|
interval: "15s"
|
|
|
|
## Configuration for alertmanager
|
|
## ref: https://prometheus.io/docs/alerting/alertmanager/
|
|
##
|
|
alertmanager:
|
|
|
|
## Deploy alertmanager
|
|
##
|
|
enabled: true
|
|
|
|
## Annotations for Alertmanager
|
|
##
|
|
annotations: {}
|
|
|
|
## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
|
|
##
|
|
apiVersion: v2
|
|
|
|
## Service account for Alertmanager to use.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
|
##
|
|
serviceAccount:
|
|
create: true
|
|
name: ""
|
|
annotations: {}
|
|
|
|
## Configure pod disruption budgets for Alertmanager
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
|
|
## This configuration is immutable once created and will require the PDB to be deleted to be changed
|
|
## https://github.com/kubernetes/kubernetes/issues/45398
|
|
##
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minAvailable: 1
|
|
maxUnavailable: ""
|
|
|
|
## Alertmanager configuration directives
|
|
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
|
|
## https://prometheus.io/webtools/alerting/routing-tree-editor/
|
|
##
|
|
config:
|
|
global:
|
|
resolve_timeout: 5m
|
|
route:
|
|
group_by: ['job']
|
|
group_wait: 30s
|
|
group_interval: 5m
|
|
repeat_interval: 12h
|
|
receiver: 'null'
|
|
routes:
|
|
- match:
|
|
alertname: Watchdog
|
|
receiver: 'null'
|
|
receivers:
|
|
- name: 'null'
|
|
templates:
|
|
- '/etc/alertmanager/config/*.tmpl'
|
|
|
|
## Pass the Alertmanager configuration directives through Helm's templating
|
|
## engine. If the Alertmanager configuration contains Alertmanager templates,
|
|
## they'll need to be properly escaped so that they are not interpreted by
|
|
## Helm
|
|
## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
|
|
## https://prometheus.io/docs/alerting/configuration/#tmpl_string
|
|
## https://prometheus.io/docs/alerting/notifications/
|
|
## https://prometheus.io/docs/alerting/notification_examples/
|
|
tplConfig: false
|
|
|
|
## Alertmanager template files to format alerts
|
|
## By default, templateFiles are placed in /etc/alertmanager/config/ and if
|
|
## they have a .tmpl file suffix will be loaded. See config.templates above
|
|
## to change, add other suffixes. If adding other suffixes, be sure to update
|
|
## config.templates above to include those suffixes.
|
|
## ref: https://prometheus.io/docs/alerting/notifications/
|
|
## https://prometheus.io/docs/alerting/notification_examples/
|
|
##
|
|
|
|
templateFiles:
|
|
rancher_defaults.tmpl: |-
|
|
{{- define "slack.rancher.text" -}}
|
|
{{ template "rancher.text_multiple" . }}
|
|
{{- end -}}
|
|
|
|
{{- define "rancher.text_multiple" -}}
|
|
*[GROUP - Details]*
|
|
One or more alarms in this group have triggered a notification.
|
|
|
|
{{- if gt (len .GroupLabels.Values) 0 }}
|
|
*Group Labels:*
|
|
{{- range .GroupLabels.SortedPairs }}
|
|
• *{{ .Name }}:* `{{ .Value }}`
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- if .ExternalURL }}
|
|
*Link to AlertManager:* {{ .ExternalURL }}
|
|
{{- end }}
|
|
|
|
{{- range .Alerts }}
|
|
{{ template "rancher.text_single" . }}
|
|
{{- end }}
|
|
{{- end -}}
|
|
|
|
{{- define "rancher.text_single" -}}
|
|
{{- if .Labels.alertname }}
|
|
*[ALERT - {{ .Labels.alertname }}]*
|
|
{{- else }}
|
|
*[ALERT]*
|
|
{{- end }}
|
|
{{- if .Labels.severity }}
|
|
*Severity:* `{{ .Labels.severity }}`
|
|
{{- end }}
|
|
{{- if .Labels.cluster }}
|
|
*Cluster:* {{ .Labels.cluster }}
|
|
{{- end }}
|
|
{{- if .Annotations.summary }}
|
|
*Summary:* {{ .Annotations.summary }}
|
|
{{- end }}
|
|
{{- if .Annotations.message }}
|
|
*Message:* {{ .Annotations.message }}
|
|
{{- end }}
|
|
{{- if .Annotations.description }}
|
|
*Description:* {{ .Annotations.description }}
|
|
{{- end }}
|
|
{{- if .Annotations.runbook_url }}
|
|
*Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:>
|
|
{{- end }}
|
|
{{- with .Labels }}
|
|
{{- with .Remove (stringSlice "alertname" "severity" "cluster") }}
|
|
{{- if gt (len .) 0 }}
|
|
*Additional Labels:*
|
|
{{- range .SortedPairs }}
|
|
• *{{ .Name }}:* `{{ .Value }}`
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- with .Annotations }}
|
|
{{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }}
|
|
{{- if gt (len .) 0 }}
|
|
*Additional Annotations:*
|
|
{{- range .SortedPairs }}
|
|
• *{{ .Name }}:* `{{ .Value }}`
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end -}}
|
|
|
|
ingress:
|
|
enabled: false
|
|
|
|
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
|
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
|
# ingressClassName: nginx
|
|
|
|
annotations: {}
|
|
|
|
labels: {}
|
|
|
|
## Hosts must be provided if Ingress is enabled.
|
|
##
|
|
hosts: []
|
|
# - alertmanager.domain.com
|
|
|
|
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
|
|
##
|
|
paths: []
|
|
# - /
|
|
|
|
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
|
|
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
|
|
# pathType: ImplementationSpecific
|
|
|
|
## TLS configuration for Alertmanager Ingress
|
|
## Secret must be manually created in the namespace
|
|
##
|
|
tls: []
|
|
# - secretName: alertmanager-general-tls
|
|
# hosts:
|
|
# - alertmanager.example.com
|
|
|
|
## Configuration for Alertmanager secret
|
|
##
|
|
secret:
|
|
annotations: {}
|
|
|
|
## Configuration for Alertmanager service
|
|
##
|
|
service:
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
## Port for Alertmanager Service to listen on
|
|
##
|
|
port: 9093
|
|
## To be used with a proxy extraContainer port
|
|
##
|
|
targetPort: 9093
|
|
## Port to expose on each node
|
|
## Only used if service.type is 'NodePort'
|
|
##
|
|
nodePort: 30903
|
|
## List of IP addresses at which the Prometheus server service is available
|
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
|
##
|
|
|
|
## Additional ports to open for Alertmanager service
|
|
additionalPorts: []
|
|
|
|
externalIPs: []
|
|
loadBalancerIP: ""
|
|
loadBalancerSourceRanges: []
|
|
## Service type
|
|
##
|
|
type: ClusterIP
|
|
|
|
## If true, create a serviceMonitor for alertmanager
|
|
##
|
|
serviceMonitor:
|
|
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
|
##
|
|
interval: ""
|
|
selfMonitor: true
|
|
|
|
## proxyUrl: URL of a proxy that should be used for scraping.
|
|
##
|
|
proxyUrl: ""
|
|
|
|
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
|
scheme: ""
|
|
|
|
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
|
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
|
tlsConfig: {}
|
|
|
|
bearerTokenFile:
|
|
|
|
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
|
##
|
|
metricRelabelings: []
|
|
# - action: keep
|
|
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
|
# sourceLabels: [__name__]
|
|
|
|
## RelabelConfigs to apply to samples before scraping
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
|
##
|
|
relabelings: []
|
|
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
# separator: ;
|
|
# regex: ^(.*)$
|
|
# targetLabel: nodename
|
|
# replacement: $1
|
|
# action: replace
|
|
|
|
## Settings affecting alertmanagerSpec
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
|
|
##
|
|
alertmanagerSpec:
|
|
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
|
## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
|
|
##
|
|
podMetadata: {}
|
|
|
|
## Image of Alertmanager
|
|
##
|
|
image:
|
|
repository: rancher/mirrored-prometheus-alertmanager
|
|
tag: v0.22.2
|
|
sha: ""
|
|
|
|
## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
|
|
## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
|
|
##
|
|
secrets: []
|
|
|
|
## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
|
|
## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
|
|
##
|
|
configMaps: []
|
|
|
|
## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
|
|
## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
|
|
##
|
|
# configSecret:
|
|
|
|
## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
|
|
##
|
|
alertmanagerConfigSelector:
|
|
# default ignores resources created by Rancher Monitoring
|
|
matchExpressions:
|
|
- key: release
|
|
operator: NotIn
|
|
values:
|
|
- rancher-monitoring
|
|
|
|
## Define Log Format
|
|
# Use logfmt (default) or json logging
|
|
logFormat: logfmt
|
|
|
|
## Log level for Alertmanager to be configured with.
|
|
##
|
|
logLevel: info
|
|
|
|
## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
|
|
## running cluster equal to the expected size.
|
|
replicas: 1
|
|
|
|
## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
|
|
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
|
|
##
|
|
retention: 120h
|
|
|
|
## Storage is the definition of how storage will be used by the Alertmanager instances.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
|
##
|
|
storage: {}
|
|
# volumeClaimTemplate:
|
|
# spec:
|
|
# storageClassName: gluster
|
|
# accessModes: ["ReadWriteOnce"]
|
|
# resources:
|
|
# requests:
|
|
# storage: 50Gi
|
|
# selector: {}
|
|
|
|
|
|
## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
|
|
##
|
|
externalUrl:
|
|
|
|
## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
|
|
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
|
|
##
|
|
routePrefix: /
|
|
|
|
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
|
|
##
|
|
paused: false
|
|
|
|
## Define which Nodes the Pods are scheduled on.
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
|
|
## Define resources requests and limits for single Pods.
|
|
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
|
|
##
|
|
resources:
|
|
limits:
|
|
memory: 500Mi
|
|
cpu: 1000m
|
|
requests:
|
|
memory: 100Mi
|
|
cpu: 100m
|
|
|
|
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
|
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
|
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
|
|
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
|
|
##
|
|
podAntiAffinity: ""
|
|
|
|
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
|
|
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
|
|
##
|
|
podAntiAffinityTopologyKey: kubernetes.io/hostname
|
|
|
|
## Assign custom affinity rules to the alertmanager instance
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
|
##
|
|
affinity: {}
|
|
# nodeAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# nodeSelectorTerms:
|
|
# - matchExpressions:
|
|
# - key: kubernetes.io/e2e-az-name
|
|
# operator: In
|
|
# values:
|
|
# - e2e-az1
|
|
# - e2e-az2
|
|
|
|
## If specified, the pod's tolerations.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
# - key: "key"
|
|
# operator: "Equal"
|
|
# value: "value"
|
|
# effect: "NoSchedule"
|
|
|
|
## If specified, the pod's topology spread constraints.
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
# labelSelector:
|
|
# matchLabels:
|
|
# app: alertmanager
|
|
|
|
## SecurityContext holds pod-level security attributes and common container settings.
|
|
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
|
##
|
|
securityContext:
|
|
runAsGroup: 2000
|
|
runAsNonRoot: true
|
|
runAsUser: 1000
|
|
fsGroup: 2000
|
|
|
|
## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
|
|
## Note this is only for the Alertmanager UI, not the gossip communication.
|
|
##
|
|
listenLocal: false
|
|
|
|
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
|
|
##
|
|
containers: []
|
|
|
|
# Additional volumes on the output StatefulSet definition.
|
|
volumes: []
|
|
|
|
# Additional VolumeMounts on the output StatefulSet definition.
|
|
volumeMounts: []
|
|
|
|
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
|
|
## (permissions, dir tree) on mounted volumes before starting prometheus
|
|
initContainers: []
|
|
|
|
## Priority class assigned to the Pods
|
|
##
|
|
priorityClassName: ""
|
|
|
|
## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
|
|
##
|
|
additionalPeers: []
|
|
|
|
## PortName to use for Alert Manager.
|
|
##
|
|
portName: "web"
|
|
|
|
## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
|
|
##
|
|
clusterAdvertiseAddress: false
|
|
|
|
## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
|
|
## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
|
|
forceEnableClusterMode: false
|
|
|
|
## ExtraSecret can be used to store various data in an extra secret
|
|
## (use it for example to store hashed basic auth credentials)
|
|
extraSecret:
|
|
## if not set, name will be auto generated
|
|
# name: ""
|
|
annotations: {}
|
|
data: {}
|
|
# auth: |
|
|
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
|
|
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
|
|
|
|
## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
|
|
##
|
|
grafana:
|
|
enabled: true
|
|
namespaceOverride: ""
|
|
|
|
## Grafana's primary configuration
|
|
## NOTE: values in map will be converted to ini format
|
|
## ref: http://docs.grafana.org/installation/configuration/
|
|
##
|
|
grafana.ini:
|
|
users:
|
|
auto_assign_org_role: Viewer
|
|
auth:
|
|
disable_login_form: false
|
|
auth.anonymous:
|
|
enabled: true
|
|
org_role: Viewer
|
|
auth.basic:
|
|
enabled: false
|
|
security:
|
|
# Required to embed dashboards in Rancher Cluster Overview Dashboard on Cluster Explorer
|
|
allow_embedding: true
|
|
|
|
deploymentStrategy:
|
|
type: Recreate
|
|
|
|
## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
|
|
##
|
|
forceDeployDatasources: false
|
|
|
|
## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
|
|
##
|
|
forceDeployDashboards: false
|
|
|
|
## Deploy default dashboards
|
|
##
|
|
defaultDashboardsEnabled: true
|
|
|
|
## Timezone for the default dashboards
|
|
## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
|
|
##
|
|
defaultDashboardsTimezone: utc
|
|
|
|
adminPassword: prom-operator
|
|
|
|
ingress:
|
|
## If true, Grafana Ingress will be created
|
|
##
|
|
enabled: false
|
|
|
|
## Annotations for Grafana Ingress
|
|
##
|
|
annotations: {}
|
|
# kubernetes.io/ingress.class: nginx
|
|
# kubernetes.io/tls-acme: "true"
|
|
|
|
## Labels to be added to the Ingress
|
|
##
|
|
labels: {}
|
|
|
|
## Hostnames.
|
|
## Must be provided if Ingress is enable.
|
|
##
|
|
# hosts:
|
|
# - grafana.domain.com
|
|
hosts: []
|
|
|
|
## Path for grafana ingress
|
|
path: /
|
|
|
|
## TLS configuration for grafana Ingress
|
|
## Secret must be manually created in the namespace
|
|
##
|
|
tls: []
|
|
# - secretName: grafana-general-tls
|
|
# hosts:
|
|
# - grafana.example.com
|
|
|
|
sidecar:
|
|
dashboards:
|
|
enabled: true
|
|
label: grafana_dashboard
|
|
|
|
## Annotations for Grafana dashboard configmaps
|
|
##
|
|
annotations: {}
|
|
provider:
|
|
allowUiUpdates: false
|
|
datasources:
|
|
enabled: true
|
|
defaultDatasourceEnabled: true
|
|
|
|
## URL of prometheus datasource
|
|
##
|
|
# url: http://prometheus-stack-prometheus:9090/
|
|
|
|
# If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
|
|
# defaultDatasourceScrapeInterval: 15s
|
|
|
|
## Annotations for Grafana datasource configmaps
|
|
##
|
|
annotations: {}
|
|
|
|
label: grafana_datasource
|
|
|
|
extraConfigmapMounts: []
|
|
# - name: certs-configmap
|
|
# mountPath: /etc/grafana/ssl/
|
|
# configMap: certs-configmap
|
|
# readOnly: true
|
|
|
|
## Configure additional grafana datasources (passed through tpl)
|
|
## ref: http://docs.grafana.org/administration/provisioning/#datasources
|
|
additionalDataSources: []
|
|
# - name: prometheus-sample
|
|
# access: proxy
|
|
# basicAuth: true
|
|
# basicAuthPassword: pass
|
|
# basicAuthUser: daco
|
|
# editable: false
|
|
# jsonData:
|
|
# tlsSkipVerify: true
|
|
# orgId: 1
|
|
# type: prometheus
|
|
# url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
|
|
# version: 1
|
|
|
|
## Passed to grafana subchart and used by servicemonitor below
|
|
##
|
|
service:
|
|
portName: nginx-http
|
|
## Port for Grafana Service to listen on
|
|
##
|
|
port: 80
|
|
## To be used with a proxy extraContainer port
|
|
##
|
|
targetPort: 8080
|
|
## Port to expose on each node
|
|
## Only used if service.type is 'NodePort'
|
|
##
|
|
nodePort: 30950
|
|
## Service type
|
|
##
|
|
type: ClusterIP
|
|
|
|
proxy:
|
|
image:
|
|
repository: rancher/mirrored-library-nginx
|
|
tag: 1.21.1-alpine
|
|
|
|
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
|
extraContainers: |
|
|
- name: grafana-proxy
|
|
args:
|
|
- nginx
|
|
- -g
|
|
- daemon off;
|
|
- -c
|
|
- /nginx/nginx.conf
|
|
image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
|
|
ports:
|
|
- containerPort: 8080
|
|
name: nginx-http
|
|
protocol: TCP
|
|
volumeMounts:
|
|
- mountPath: /nginx
|
|
name: grafana-nginx
|
|
- mountPath: /var/cache/nginx
|
|
name: nginx-home
|
|
securityContext:
|
|
runAsUser: 101
|
|
runAsGroup: 101
|
|
|
|
## Volumes that can be used in containers
|
|
extraContainerVolumes:
|
|
- name: nginx-home
|
|
emptyDir: {}
|
|
- name: grafana-nginx
|
|
configMap:
|
|
name: grafana-nginx-proxy-config
|
|
items:
|
|
- key: nginx.conf
|
|
mode: 438
|
|
path: nginx.conf
|
|
|
|
## If true, create a serviceMonitor for grafana
|
|
##
|
|
serviceMonitor:
|
|
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
|
##
|
|
interval: ""
|
|
selfMonitor: true
|
|
|
|
# Path to use for scraping metrics. Might be different if server.root_url is set
|
|
# in grafana.ini
|
|
path: "/metrics"
|
|
|
|
|
|
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
|
##
|
|
metricRelabelings: []
|
|
# - action: keep
|
|
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
|
# sourceLabels: [__name__]
|
|
|
|
## RelabelConfigs to apply to samples before scraping
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
|
##
|
|
relabelings: []
|
|
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
# separator: ;
|
|
# regex: ^(.*)$
|
|
# targetLabel: nodename
|
|
# replacement: $1
|
|
# action: replace
|
|
|
|
resources:
|
|
limits:
|
|
memory: 200Mi
|
|
cpu: 200m
|
|
requests:
|
|
memory: 100Mi
|
|
cpu: 100m
|
|
|
|
testFramework:
|
|
enabled: false
|
|
|
|
## Deploy a Prometheus instance
|
|
##
|
|
prometheus:
|
|
|
|
enabled: true
|
|
|
|
## Annotations for Prometheus
|
|
##
|
|
annotations: {}
|
|
|
|
## Service account for Prometheuses to use.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
|
##
|
|
serviceAccount:
|
|
create: true
|
|
name: ""
|
|
annotations: {}
|
|
|
|
## Configuration for Prometheus service
|
|
##
|
|
service:
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
## Port for Prometheus Service to listen on
|
|
##
|
|
port: 9090
|
|
|
|
## To be used with a proxy extraContainer port
|
|
targetPort: 8081
|
|
|
|
## List of IP addresses at which the Prometheus server service is available
|
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
|
##
|
|
externalIPs: []
|
|
|
|
## Port to expose on each node
|
|
## Only used if service.type is 'NodePort'
|
|
##
|
|
nodePort: 30090
|
|
|
|
## Loadbalancer IP
|
|
## Only use if service.type is "LoadBalancer"
|
|
loadBalancerIP: ""
|
|
loadBalancerSourceRanges: []
|
|
## Service type
|
|
##
|
|
type: ClusterIP
|
|
|
|
sessionAffinity: ""
|
|
|
|
## Configure pod disruption budgets for Prometheus
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
|
|
## This configuration is immutable once created and will require the PDB to be deleted to be changed
|
|
## https://github.com/kubernetes/kubernetes/issues/45398
|
|
##
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minAvailable: 1
|
|
maxUnavailable: ""
|
|
|
|
## ExtraSecret can be used to store various data in an extra secret
|
|
## (use it for example to store hashed basic auth credentials)
|
|
extraSecret:
|
|
## if not set, name will be auto generated
|
|
# name: ""
|
|
annotations: {}
|
|
data: {}
|
|
# auth: |
|
|
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
|
|
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
|
|
|
|
ingress:
|
|
enabled: false
|
|
|
|
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
|
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
|
# ingressClassName: nginx
|
|
|
|
annotations: {}
|
|
labels: {}
|
|
|
|
## Hostnames.
|
|
## Must be provided if Ingress is enabled.
|
|
##
|
|
# hosts:
|
|
# - prometheus.domain.com
|
|
hosts: []
|
|
|
|
## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
|
|
##
|
|
paths: []
|
|
# - /
|
|
|
|
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
|
|
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
|
|
# pathType: ImplementationSpecific
|
|
|
|
## TLS configuration for Prometheus Ingress
|
|
## Secret must be manually created in the namespace
|
|
##
|
|
tls: []
|
|
# - secretName: prometheus-general-tls
|
|
# hosts:
|
|
# - prometheus.example.com
|
|
|
|
## Configure additional options for default pod security policy for Prometheus
|
|
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
|
podSecurityPolicy:
|
|
allowedCapabilities: []
|
|
volumes: []
|
|
|
|
serviceMonitor:
|
|
## Scrape interval. If not set, the Prometheus default scrape interval is used.
|
|
##
|
|
interval: ""
|
|
selfMonitor: true
|
|
|
|
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
|
scheme: ""
|
|
|
|
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
|
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
|
tlsConfig: {}
|
|
|
|
bearerTokenFile:
|
|
|
|
## Metric relabel configs to apply to samples before ingestion.
|
|
##
|
|
metricRelabelings: []
|
|
# - action: keep
|
|
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
|
# sourceLabels: [__name__]
|
|
|
|
# relabel configs to apply to samples before ingestion.
|
|
##
|
|
relabelings: []
|
|
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
# separator: ;
|
|
# regex: ^(.*)$
|
|
# targetLabel: nodename
|
|
# replacement: $1
|
|
# action: replace
|
|
|
|
## Settings affecting prometheusSpec
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
|
##
|
|
prometheusSpec:
|
|
## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
|
|
##
|
|
disableCompaction: false
|
|
## APIServerConfig
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig
|
|
##
|
|
apiserverConfig: {}
|
|
|
|
## Interval between consecutive scrapes.
|
|
## Defaults to 30s.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
|
|
##
|
|
scrapeInterval: ""
|
|
|
|
## Number of seconds to wait for target to respond before erroring
|
|
##
|
|
scrapeTimeout: ""
|
|
|
|
## Interval between consecutive evaluations.
|
|
##
|
|
evaluationInterval: ""
|
|
|
|
## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
|
|
##
|
|
listenLocal: false
|
|
|
|
## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
|
|
## This is disabled by default.
|
|
## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
|
|
##
|
|
enableAdminAPI: false
|
|
|
|
## WebTLSConfig defines the TLS parameters for HTTPS
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#webtlsconfig
|
|
web: {}
|
|
|
|
# EnableFeatures API enables access to Prometheus disabled features.
|
|
# ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
|
|
enableFeatures: []
|
|
# - exemplar-storage
|
|
|
|
## Image of Prometheus.
|
|
##
|
|
image:
|
|
repository: rancher/mirrored-prometheus-prometheus
|
|
tag: v2.28.1
|
|
sha: ""
|
|
|
|
## Tolerations for use with node taints
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
# - key: "key"
|
|
# operator: "Equal"
|
|
# value: "value"
|
|
# effect: "NoSchedule"
|
|
|
|
## If specified, the pod's topology spread constraints.
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
# labelSelector:
|
|
# matchLabels:
|
|
# app: prometheus
|
|
|
|
## Alertmanagers to which alerts will be sent
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints
|
|
##
|
|
## Default configuration will connect to the alertmanager deployed as part of this release
|
|
##
|
|
alertingEndpoints: []
|
|
# - name: ""
|
|
# namespace: ""
|
|
# port: http
|
|
# scheme: http
|
|
# pathPrefix: ""
|
|
# tlsConfig: {}
|
|
# bearerTokenFile: ""
|
|
# apiVersion: v2
|
|
|
|
## External labels to add to any time series or alerts when communicating with external systems
|
|
##
|
|
externalLabels: {}
|
|
|
|
## Name of the external label used to denote Prometheus instance name
|
|
##
|
|
prometheusExternalLabelName: ""
|
|
|
|
## If true, the Operator won't add the external label used to denote Prometheus instance name
|
|
##
|
|
prometheusExternalLabelNameClear: false
|
|
|
|
## External URL at which Prometheus will be reachable.
|
|
##
|
|
externalUrl: ""
|
|
|
|
## Define which Nodes the Pods are scheduled on.
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
|
|
## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
|
|
## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
|
|
## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
|
|
## with the new list of secrets.
|
|
##
|
|
secrets: []
|
|
|
|
## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
|
|
## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
|
|
##
|
|
configMaps: []
|
|
|
|
## QuerySpec defines the query command line flags when starting Prometheus.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec
|
|
##
|
|
query: {}
|
|
|
|
## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
|
|
## prometheus resource to be created with selectors based on values in the helm deployment,
|
|
## which will also match the PrometheusRule resources created
|
|
##
|
|
ruleSelectorNilUsesHelmValues: false
|
|
|
|
## PrometheusRules to be selected for target discovery.
|
|
## If {}, select all PrometheusRules
|
|
##
|
|
ruleSelector:
|
|
# default ignores resources created by Rancher Monitoring
|
|
matchExpressions:
|
|
- key: release
|
|
operator: NotIn
|
|
values:
|
|
- rancher-monitoring
|
|
|
|
## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
|
|
## prometheus resource to be created with selectors based on values in the helm deployment,
|
|
## which will also match the servicemonitors created
|
|
##
|
|
serviceMonitorSelectorNilUsesHelmValues: false
|
|
|
|
## ServiceMonitors to be selected for target discovery.
|
|
## If {}, select all ServiceMonitors
|
|
##
|
|
serviceMonitorSelector:
|
|
# default ignores resources created by Rancher Monitoring
|
|
matchExpressions:
|
|
- key: release
|
|
operator: NotIn
|
|
values:
|
|
- rancher-monitoring
|
|
|
|
## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
|
|
## prometheus resource to be created with selectors based on values in the helm deployment,
|
|
## which will also match the podmonitors created
|
|
##
|
|
podMonitorSelectorNilUsesHelmValues: false
|
|
|
|
## PodMonitors to be selected for target discovery.
|
|
## If {}, select all PodMonitors
|
|
##
|
|
podMonitorSelector:
|
|
# default ignores resources created by Rancher Monitoring
|
|
matchExpressions:
|
|
- key: release
|
|
operator: NotIn
|
|
values:
|
|
- rancher-monitoring
|
|
|
|
## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
|
|
## prometheus resource to be created with selectors based on values in the helm deployment,
|
|
## which will also match the probes created
|
|
##
|
|
probeSelectorNilUsesHelmValues: true
|
|
|
|
## Probes to be selected for target discovery.
|
|
## If {}, select all Probes
|
|
##
|
|
probeSelector:
|
|
# default ignores resources created by Rancher Monitoring
|
|
matchExpressions:
|
|
- key: release
|
|
operator: NotIn
|
|
values:
|
|
- rancher-monitoring
|
|
|
|
## How long to retain metrics
|
|
##
|
|
retention: 10d
|
|
|
|
## Maximum size of metrics
|
|
##
|
|
retentionSize: ""
|
|
|
|
## Enable compression of the write-ahead log using Snappy.
|
|
##
|
|
walCompression: false
|
|
|
|
## If true, the Operator won't process any Prometheus configuration changes
|
|
##
|
|
paused: false
|
|
|
|
## Number of replicas of each shard to deploy for a Prometheus deployment.
|
|
## Number of replicas multiplied by shards is the total number of Pods created.
|
|
##
|
|
replicas: 1
|
|
|
|
## EXPERIMENTAL: Number of shards to distribute targets onto.
|
|
## Number of replicas multiplied by shards is the total number of Pods created.
|
|
## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
|
|
## Increasing shards will not reshard data either but it will continue to be available from the same instances.
|
|
## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
|
|
## Sharding is done on the content of the `__address__` target meta-label.
|
|
##
|
|
shards: 1
|
|
|
|
## Log level for Prometheus be configured in
|
|
##
|
|
logLevel: info
|
|
|
|
## Log format for Prometheus be configured in
|
|
##
|
|
logFormat: logfmt
|
|
|
|
## Prefix used to register routes, overriding externalUrl route.
|
|
## Useful for proxies that rewrite URLs.
|
|
##
|
|
routePrefix: /
|
|
|
|
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
|
|
## Metadata Labels and Annotations gets propagated to the prometheus pods.
|
|
##
|
|
podMetadata: {}
|
|
# labels:
|
|
# app: prometheus
|
|
# k8s-app: prometheus
|
|
|
|
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
|
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
|
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
|
|
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
|
|
podAntiAffinity: ""
|
|
|
|
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
|
|
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
|
|
##
|
|
podAntiAffinityTopologyKey: kubernetes.io/hostname
|
|
|
|
## Assign custom affinity rules to the prometheus instance
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
|
##
|
|
affinity: {}
|
|
# nodeAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# nodeSelectorTerms:
|
|
# - matchExpressions:
|
|
# - key: kubernetes.io/e2e-az-name
|
|
# operator: In
|
|
# values:
|
|
# - e2e-az1
|
|
# - e2e-az2
|
|
|
|
## Resource limits & requests
|
|
##
|
|
resources:
|
|
limits:
|
|
memory: 3000Mi
|
|
cpu: 1000m
|
|
requests:
|
|
memory: 750Mi
|
|
cpu: 750m
|
|
|
|
## Prometheus StorageSpec for persistent data
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
|
##
|
|
storageSpec: {}
|
|
## Using PersistentVolumeClaim
|
|
##
|
|
# volumeClaimTemplate:
|
|
# spec:
|
|
# storageClassName: gluster
|
|
# accessModes: ["ReadWriteOnce"]
|
|
# resources:
|
|
# requests:
|
|
# storage: 50Gi
|
|
# selector: {}
|
|
|
|
## Using tmpfs volume
|
|
##
|
|
# emptyDir:
|
|
# medium: Memory
|
|
|
|
# Additional volumes on the output StatefulSet definition.
|
|
volumes:
|
|
- name: nginx-home
|
|
emptyDir: {}
|
|
- name: prometheus-nginx
|
|
configMap:
|
|
name: prometheus-nginx-proxy-config
|
|
defaultMode: 438
|
|
|
|
# Additional VolumeMounts on the output StatefulSet definition.
|
|
volumeMounts: []
|
|
|
|
## SecurityContext holds pod-level security attributes and common container settings.
|
|
## This defaults to non root user with uid 1000 and gid 2000.
|
|
## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md
|
|
##
|
|
securityContext:
|
|
runAsGroup: 2000
|
|
runAsNonRoot: true
|
|
runAsUser: 1000
|
|
fsGroup: 2000
|
|
|
|
## Priority class assigned to the Pods
|
|
##
|
|
priorityClassName: ""
|
|
|
|
proxy:
|
|
image:
|
|
repository: rancher/mirrored-library-nginx
|
|
tag: 1.21.1-alpine
|
|
|
|
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
|
|
## if using proxy extraContainer update targetPort with proxy container port
|
|
containers: |
|
|
- name: prometheus-proxy
|
|
args:
|
|
- nginx
|
|
- -g
|
|
- daemon off;
|
|
- -c
|
|
- /nginx/nginx.conf
|
|
image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}"
|
|
ports:
|
|
- containerPort: 8081
|
|
name: nginx-http
|
|
protocol: TCP
|
|
volumeMounts:
|
|
- mountPath: /nginx
|
|
name: prometheus-nginx
|
|
- mountPath: /var/cache/nginx
|
|
name: nginx-home
|
|
securityContext:
|
|
runAsUser: 101
|
|
runAsGroup: 101
|
|
|
|
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
|
|
## (permissions, dir tree) on mounted volumes before starting prometheus
|
|
initContainers: []
|
|
|
|
## PortName to use for Prometheus.
|
|
##
|
|
portName: "nginx-http"
|
|
|
|
## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
|
|
## on the file system of the Prometheus container e.g. bearer token files.
|
|
arbitraryFSAccessThroughSMs: false
|
|
|
|
## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
|
|
## or PodMonitor to true, this overrides honor_labels to false.
|
|
overrideHonorLabels: false
|
|
|
|
## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
|
|
overrideHonorTimestamps: false
|
|
|
|
## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
|
|
## The label value will always be the namespace of the object that is being created.
|
|
## Disabled by default
|
|
enforcedNamespaceLabel: ""
|
|
|
|
## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
|
|
## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
|
|
prometheusRulesExcludedFromEnforce: []
|
|
|
|
## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
|
|
## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
|
|
## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
|
|
## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
|
|
queryLogFile: false
|
|
|
|
## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
|
|
## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
|
|
## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
|
|
enforcedSampleLimit: false
|
|
|
|
## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
|
|
## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
|
|
## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
|
|
## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
|
|
enforcedTargetLimit: false
|
|
|
|
|
|
## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
|
|
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
|
|
## 2.27.0 and newer.
|
|
enforcedLabelLimit: false
|
|
|
|
## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
|
|
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
|
|
## 2.27.0 and newer.
|
|
enforcedLabelNameLengthLimit: false
|
|
|
|
## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
|
|
## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
|
|
## versions 2.27.0 and newer.
|
|
enforcedLabelValueLengthLimit: false
|
|
|
|
## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
|
|
## in Prometheus so it may change in any upcoming release.
|
|
allowOverlappingBlocks: false
|