mirror of https://git.rancher.io/charts
(dev-v2.6-archive) add sub-charts for scraping ingress-nginx in rke and rke2 clusters
(partially cherry picked from commit 1157b4a153
)
pull/1680/head
parent
e45348b081
commit
ed9fa241d7
|
@ -0,0 +1,2 @@
|
|||
workingDir: ""
|
||||
url: packages/rancher-pushprox
|
|
@ -0,0 +1,2 @@
|
|||
workingDir: ""
|
||||
url: packages/rancher-pushprox
|
|
@ -1,12 +1,12 @@
|
|||
# Source:
|
||||
{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.ingressNginx.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ .Values.grafana.defaultDashboards.namespace }}
|
||||
name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "ingress-nginx" | trunc 63 | trimSuffix "-" }}
|
||||
annotations:
|
||||
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
|
||||
{{- if .Values.grafana.sidecar.dashboards.annotations }}
|
||||
annotations: {{ toYaml .Values.grafana.sidecar.dashboards.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- if $.Values.grafana.sidecar.dashboards.label }}
|
||||
{{ $.Values.grafana.sidecar.dashboards.label }}: "1"
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
{{- if .Values.ingressNginx.enabled }}
|
||||
{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }}
|
||||
{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }}
|
||||
{{- end }}
|
||||
{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
{{- if .Values.ingressNginx.enabled }}
|
||||
{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }}
|
||||
{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }}
|
||||
{{- end }}
|
||||
{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
apiVersion: v2
|
||||
appVersion: 0.46.0
|
||||
dependencies:
|
||||
@@ -65,19 +74,17 @@
|
||||
@@ -71,19 +80,17 @@
|
||||
- condition: rkeScheduler.enabled
|
||||
name: rkeScheduler
|
||||
repository: file://./charts/rkeScheduler
|
||||
|
@ -39,7 +39,7 @@
|
|||
kubeVersion: '>=1.16.0-0'
|
||||
maintainers:
|
||||
- name: vsliouniaev
|
||||
@@ -90,9 +97,12 @@
|
||||
@@ -96,9 +103,12 @@
|
||||
name: scottrigby
|
||||
- email: miroslav.hadzhiev@gmail.com
|
||||
name: Xtigyro
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
--- charts-original/README.md
|
||||
+++ charts/README.md
|
||||
@@ -171,7 +171,41 @@
|
||||
@@ -171,7 +171,43 @@
|
||||
helm show values prometheus-community/kube-prometheus-stack
|
||||
```
|
||||
|
||||
|
@ -33,7 +33,9 @@
|
|||
+| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` |
|
||||
+| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` |
|
||||
+| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` |
|
||||
+| `rkeIngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE clusters | `false` |
|
||||
+| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` |
|
||||
+| `rke2IngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE2 clusters | `false` |
|
||||
+| `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` |
|
||||
+| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` |
|
||||
+| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` |
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
--- charts-original/values.yaml
|
||||
+++ charts/values.yaml
|
||||
@@ -2,13 +2,375 @@
|
||||
@@ -2,13 +2,423 @@
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
|
@ -84,6 +84,21 @@
|
|||
+ - effect: "NoSchedule"
|
||||
+ operator: "Exists"
|
||||
+
|
||||
+rkeIngressNginx:
|
||||
+ enabled: false
|
||||
+ metricsPort: 10254
|
||||
+ component: ingress-nginx
|
||||
+ clients:
|
||||
+ port: 10015
|
||||
+ useLocalhost: true
|
||||
+ tolerations:
|
||||
+ - effect: "NoExecute"
|
||||
+ operator: "Exists"
|
||||
+ - effect: "NoSchedule"
|
||||
+ operator: "Exists"
|
||||
+ nodeSelector:
|
||||
+ node-role.kubernetes.io/worker: "true"
|
||||
+
|
||||
+## k3s PushProx Monitoring
|
||||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||||
+##
|
||||
|
@ -260,6 +275,39 @@
|
|||
+ - effect: "NoSchedule"
|
||||
+ operator: "Exists"
|
||||
+
|
||||
+rke2IngressNginx:
|
||||
+ enabled: false
|
||||
+ metricsPort: 10254
|
||||
+ component: ingress-nginx
|
||||
+ clients:
|
||||
+ port: 10015
|
||||
+ useLocalhost: true
|
||||
+ tolerations:
|
||||
+ - effect: "NoExecute"
|
||||
+ operator: "Exists"
|
||||
+ - effect: "NoSchedule"
|
||||
+ operator: "Exists"
|
||||
+ affinity:
|
||||
+ podAffinity:
|
||||
+ requiredDuringSchedulingIgnoredDuringExecution:
|
||||
+ - labelSelector:
|
||||
+ matchExpressions:
|
||||
+ - key: "app.kubernetes.io/component"
|
||||
+ operator: "In"
|
||||
+ values:
|
||||
+ - "controller"
|
||||
+ topologyKey: "kubernetes.io/hostname"
|
||||
+ namespaces:
|
||||
+ - "kube-system"
|
||||
+ # in the RKE2 cluster, the ingress-nginx-controller is deployed as
|
||||
+ # a Deployment with 1 pod when RKE2 version is <= 1.20,
|
||||
+ # a DaemonSet when RKE2 version is >= 1.21
|
||||
+ deployment:
|
||||
+ enabled: false
|
||||
+ replicas: 1
|
||||
+
|
||||
+
|
||||
+
|
||||
+## Additional PushProx Monitoring
|
||||
+## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox
|
||||
+##
|
||||
|
@ -378,7 +426,7 @@
|
|||
|
||||
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
|
||||
##
|
||||
@@ -89,8 +451,32 @@
|
||||
@@ -89,8 +499,32 @@
|
||||
|
||||
##
|
||||
global:
|
||||
|
@ -411,7 +459,7 @@
|
|||
pspEnabled: true
|
||||
pspAnnotations: {}
|
||||
## Specify pod annotations
|
||||
@@ -143,6 +529,22 @@
|
||||
@@ -143,6 +577,22 @@
|
||||
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
|
||||
## https://prometheus.io/webtools/alerting/routing-tree-editor/
|
||||
##
|
||||
|
@ -434,7 +482,7 @@
|
|||
config:
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
@@ -179,25 +581,76 @@
|
||||
@@ -179,25 +629,76 @@
|
||||
## ref: https://prometheus.io/docs/alerting/notifications/
|
||||
## https://prometheus.io/docs/alerting/notification_examples/
|
||||
##
|
||||
|
@ -530,7 +578,7 @@
|
|||
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -235,6 +688,25 @@
|
||||
@@ -235,6 +736,25 @@
|
||||
## Configuration for Alertmanager secret
|
||||
##
|
||||
secret:
|
||||
|
@ -556,7 +604,7 @@
|
|||
annotations: {}
|
||||
|
||||
## Configuration for creating an Ingress that will map to each Alertmanager replica service
|
||||
@@ -352,7 +824,7 @@
|
||||
@@ -352,7 +872,7 @@
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
tlsConfig: {}
|
||||
|
||||
|
@ -565,7 +613,7 @@
|
|||
|
||||
## metric relabel configs to apply to samples before ingestion.
|
||||
##
|
||||
@@ -383,7 +855,7 @@
|
||||
@@ -383,7 +903,7 @@
|
||||
## Image of Alertmanager
|
||||
##
|
||||
image:
|
||||
|
@ -574,7 +622,7 @@
|
|||
tag: v0.21.0
|
||||
sha: ""
|
||||
|
||||
@@ -495,9 +967,13 @@
|
||||
@@ -495,9 +1015,13 @@
|
||||
## Define resources requests and limits for single Pods.
|
||||
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
|
@ -591,7 +639,7 @@
|
|||
|
||||
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
|
||||
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
|
||||
@@ -601,10 +1077,46 @@
|
||||
@@ -601,10 +1125,46 @@
|
||||
enabled: true
|
||||
namespaceOverride: ""
|
||||
|
||||
|
@ -638,7 +686,7 @@
|
|||
adminPassword: prom-operator
|
||||
|
||||
ingress:
|
||||
@@ -644,6 +1156,7 @@
|
||||
@@ -644,6 +1204,7 @@
|
||||
dashboards:
|
||||
enabled: true
|
||||
label: grafana_dashboard
|
||||
|
@ -646,7 +694,7 @@
|
|||
|
||||
## Annotations for Grafana dashboard configmaps
|
||||
##
|
||||
@@ -692,7 +1205,60 @@
|
||||
@@ -692,7 +1253,60 @@
|
||||
## Passed to grafana subchart and used by servicemonitor below
|
||||
##
|
||||
service:
|
||||
|
@ -708,7 +756,7 @@
|
|||
|
||||
## If true, create a serviceMonitor for grafana
|
||||
##
|
||||
@@ -722,6 +1288,14 @@
|
||||
@@ -722,6 +1336,14 @@
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
@ -723,7 +771,7 @@
|
|||
|
||||
## Component scraping the kube api server
|
||||
##
|
||||
@@ -879,7 +1453,7 @@
|
||||
@@ -879,7 +1501,7 @@
|
||||
## Component scraping the kube controller manager
|
||||
##
|
||||
kubeControllerManager:
|
||||
|
@ -732,7 +780,7 @@
|
|||
|
||||
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
|
||||
##
|
||||
@@ -1014,7 +1588,7 @@
|
||||
@@ -1014,7 +1636,7 @@
|
||||
## Component scraping etcd
|
||||
##
|
||||
kubeEtcd:
|
||||
|
@ -741,7 +789,7 @@
|
|||
|
||||
## If your etcd is not deployed as a pod, specify IPs it can be found on
|
||||
##
|
||||
@@ -1076,7 +1650,7 @@
|
||||
@@ -1076,7 +1698,7 @@
|
||||
## Component scraping kube scheduler
|
||||
##
|
||||
kubeScheduler:
|
||||
|
@ -750,7 +798,7 @@
|
|||
|
||||
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
|
||||
##
|
||||
@@ -1131,7 +1705,7 @@
|
||||
@@ -1131,7 +1753,7 @@
|
||||
## Component scraping kube proxy
|
||||
##
|
||||
kubeProxy:
|
||||
|
@ -759,7 +807,7 @@
|
|||
|
||||
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
|
||||
##
|
||||
@@ -1210,6 +1784,13 @@
|
||||
@@ -1210,6 +1832,13 @@
|
||||
create: true
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
|
@ -773,7 +821,7 @@
|
|||
|
||||
## Deploy node exporter as a daemonset to all nodes
|
||||
##
|
||||
@@ -1259,6 +1840,16 @@
|
||||
@@ -1259,6 +1888,16 @@
|
||||
extraArgs:
|
||||
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
|
||||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
|
||||
|
@ -790,7 +838,7 @@
|
|||
|
||||
## Manages Prometheus and Alertmanager components
|
||||
##
|
||||
@@ -1271,8 +1862,8 @@
|
||||
@@ -1271,8 +1910,8 @@
|
||||
enabled: true
|
||||
# Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||
tlsMinVersion: VersionTLS13
|
||||
|
@ -801,7 +849,7 @@
|
|||
|
||||
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
|
||||
## rules from making their way into prometheus and potentially preventing the container from starting
|
||||
@@ -1289,7 +1880,7 @@
|
||||
@@ -1289,7 +1928,7 @@
|
||||
patch:
|
||||
enabled: true
|
||||
image:
|
||||
|
@ -810,7 +858,7 @@
|
|||
tag: v1.5.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
@@ -1428,13 +2019,13 @@
|
||||
@@ -1428,13 +2067,13 @@
|
||||
|
||||
## Resource limits & requests
|
||||
##
|
||||
|
@ -831,7 +879,7 @@
|
|||
|
||||
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
|
||||
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
|
||||
@@ -1487,7 +2078,7 @@
|
||||
@@ -1487,7 +2126,7 @@
|
||||
## Prometheus-operator image
|
||||
##
|
||||
image:
|
||||
|
@ -840,7 +888,7 @@
|
|||
tag: v0.46.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
@@ -1503,7 +2094,7 @@
|
||||
@@ -1503,7 +2142,7 @@
|
||||
## Prometheus-config-reloader image to use for config and rule reloading
|
||||
##
|
||||
prometheusConfigReloaderImage:
|
||||
|
@ -849,7 +897,7 @@
|
|||
tag: v0.46.0
|
||||
sha: ""
|
||||
|
||||
@@ -1558,6 +2149,14 @@
|
||||
@@ -1558,6 +2197,14 @@
|
||||
##
|
||||
nodePort: 30901
|
||||
|
||||
|
@ -864,7 +912,7 @@
|
|||
## Configuration for Prometheus service
|
||||
##
|
||||
service:
|
||||
@@ -1570,7 +2169,7 @@
|
||||
@@ -1570,7 +2217,7 @@
|
||||
port: 9090
|
||||
|
||||
## To be used with a proxy extraContainer port
|
||||
|
@ -873,7 +921,7 @@
|
|||
|
||||
## List of IP addresses at which the Prometheus server service is available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
@@ -1822,7 +2421,7 @@
|
||||
@@ -1822,7 +2469,7 @@
|
||||
## Image of Prometheus.
|
||||
##
|
||||
image:
|
||||
|
@ -882,7 +930,7 @@
|
|||
tag: v2.24.0
|
||||
sha: ""
|
||||
|
||||
@@ -1885,6 +2484,11 @@
|
||||
@@ -1885,6 +2532,11 @@
|
||||
##
|
||||
externalUrl: ""
|
||||
|
||||
|
@ -894,7 +942,7 @@
|
|||
## Define which Nodes the Pods are scheduled on.
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
@@ -1917,7 +2521,7 @@
|
||||
@@ -1917,7 +2569,7 @@
|
||||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||||
## which will also match the PrometheusRule resources created
|
||||
##
|
||||
|
@ -903,7 +951,7 @@
|
|||
|
||||
## PrometheusRules to be selected for target discovery.
|
||||
## If {}, select all PrometheusRules
|
||||
@@ -1942,7 +2546,7 @@
|
||||
@@ -1942,7 +2594,7 @@
|
||||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||||
## which will also match the servicemonitors created
|
||||
##
|
||||
|
@ -912,7 +960,7 @@
|
|||
|
||||
## ServiceMonitors to be selected for target discovery.
|
||||
## If {}, select all ServiceMonitors
|
||||
@@ -1965,7 +2569,7 @@
|
||||
@@ -1965,7 +2617,7 @@
|
||||
## prometheus resource to be created with selectors based on values in the helm deployment,
|
||||
## which will also match the podmonitors created
|
||||
##
|
||||
|
@ -921,7 +969,7 @@
|
|||
|
||||
## PodMonitors to be selected for target discovery.
|
||||
## If {}, select all PodMonitors
|
||||
@@ -2092,9 +2696,13 @@
|
||||
@@ -2092,9 +2744,13 @@
|
||||
|
||||
## Resource limits & requests
|
||||
##
|
||||
|
@ -938,7 +986,7 @@
|
|||
|
||||
## Prometheus StorageSpec for persistent data
|
||||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
|
||||
@@ -2117,7 +2725,13 @@
|
||||
@@ -2117,7 +2773,13 @@
|
||||
# medium: Memory
|
||||
|
||||
# Additional volumes on the output StatefulSet definition.
|
||||
|
@ -953,7 +1001,7 @@
|
|||
|
||||
# Additional VolumeMounts on the output StatefulSet definition.
|
||||
volumeMounts: []
|
||||
@@ -2224,9 +2838,34 @@
|
||||
@@ -2224,9 +2886,34 @@
|
||||
##
|
||||
thanos: {}
|
||||
|
||||
|
@ -989,7 +1037,7 @@
|
|||
|
||||
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
|
||||
## (permissions, dir tree) on mounted volumes before starting prometheus
|
||||
@@ -2234,7 +2873,7 @@
|
||||
@@ -2234,7 +2921,7 @@
|
||||
|
||||
## PortName to use for Prometheus.
|
||||
##
|
||||
|
|
Loading…
Reference in New Issue