diff --git a/packages/rancher-monitoring/overlay/CHANGELOG.md b/packages/rancher-monitoring/overlay/CHANGELOG.md index 5b0bc9f11..6fd7672f6 100644 --- a/packages/rancher-monitoring/overlay/CHANGELOG.md +++ b/packages/rancher-monitoring/overlay/CHANGELOG.md @@ -17,6 +17,7 @@ All notable changes from the upstream Prometheus Operator chart will be added to - Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter` - Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`. - Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart. +- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`. ### Modified - Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml` - Modified the default `node-exporter` port from `9100` to `9796` diff --git a/packages/rancher-monitoring/overlay/charts/grafana/templates/nginx-config.yaml b/packages/rancher-monitoring/overlay/charts/grafana/templates/nginx-config.yaml new file mode 100644 index 000000000..f847c51ce --- /dev/null +++ b/packages/rancher-monitoring/overlay/charts/grafana/templates/nginx-config.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-nginx-proxy-config + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes auto; + error_log /dev/stdout warn; + pid /var/cache/nginx/nginx.pid; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)'; + + proxy_connect_timeout 10; + proxy_read_timeout 180; + proxy_send_timeout 5; + proxy_buffering off; + proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g; + + server { + listen 8080; + access_log off; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 2; + gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + proxy_set_header Host $host; + + location /api/dashboards { + proxy_pass http://localhost:3000; + } + + location /api/search { + proxy_pass http://localhost:3000; + + sub_filter_types application/json; + sub_filter_once off; + sub_filter '"url":"/d' '"url":"d'; + } + + location / { + proxy_cache my_zone; + proxy_cache_valid 200 302 1d; + proxy_cache_valid 301 30d; + proxy_cache_valid any 5m; + proxy_cache_bypass $http_cache_control; + add_header X-Proxy-Cache $upstream_cache_status; + add_header Cache-Control "public"; + + proxy_pass http://localhost:3000/; + + sub_filter_types text/html; + sub_filter_once off; + sub_filter '"appSubUrl":""' '"appSubUrl":"."'; + sub_filter '"url":"/' '"url":"./'; + sub_filter ':"/avatar/' ':"avatar/'; + + if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) { + expires 90d; + } + } + } + } diff --git a/packages/rancher-monitoring/rancher-monitoring.patch b/packages/rancher-monitoring/rancher-monitoring.patch index f3ac4655b..92f1286b1 100644 --- a/packages/rancher-monitoring/rancher-monitoring.patch +++ b/packages/rancher-monitoring/rancher-monitoring.patch @@ -193,6 +193,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- if .Values.command }} +@@ -285,7 +284,7 @@ + {{- end }} + ports: + - name: {{ .Values.service.portName }} +- containerPort: {{ .Values.service.port }} ++ containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/values.yaml packages/rancher-monitoring/charts/charts/grafana/values.yaml --- packages/rancher-monitoring/charts-original/charts/grafana/values.yaml +++ packages/rancher-monitoring/charts/charts/grafana/values.yaml @@ -2273,16 +2282,18 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Annotations for Grafana dashboard configmaps ## -@@ -575,6 +942,19 @@ +@@ -574,7 +935,60 @@ + ## Passed to grafana subchart and used by servicemonitor below ## service: - portName: service +- portName: service ++ portName: nginx-http + ## Port for Grafana Service to listen on + ## + port: 80 + ## To be used with a proxy extraContainer port + ## -+ targetPort: 3000 ++ targetPort: 8080 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## @@ -2290,10 +2301,50 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val + ## Service type + ## + type: ClusterIP ++ ++ proxy: ++ image: ++ repository: rancher/library-nginx ++ tag: 1.19.2-alpine ++ ++ ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod ++ extraContainers: | ++ - name: grafana-proxy ++ args: ++ - nginx ++ - -g ++ - daemon off; ++ - -c ++ - /nginx/nginx.conf ++ image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}" ++ ports: ++ - containerPort: 8080 ++ name: nginx-http ++ protocol: TCP ++ volumeMounts: ++ - mountPath: /nginx ++ name: grafana-nginx ++ - mountPath: /var/cache/nginx ++ name: nginx-home ++ securityContext: ++ runAsUser: 101 ++ runAsGroup: 101 ++ ++ ## Volumes that can be used in containers ++ extraContainerVolumes: ++ - name: nginx-home ++ emptyDir: {} ++ - name: grafana-nginx ++ configMap: ++ name: grafana-nginx-proxy-config ++ items: ++ - key: nginx.conf ++ mode: 438 ++ path: nginx.conf ## If true, create a serviceMonitor for grafana ## -@@ -600,6 +980,14 @@ +@@ -600,6 +1014,14 @@ # targetLabel: nodename # replacement: $1 # action: replace @@ -2308,7 +2359,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Component scraping the kube api server ## -@@ -756,7 +1144,7 @@ +@@ -756,7 +1178,7 @@ ## Component scraping the kube controller manager ## kubeControllerManager: @@ -2317,7 +2368,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on ## -@@ -889,7 +1277,7 @@ +@@ -889,7 +1311,7 @@ ## Component scraping etcd ## kubeEtcd: @@ -2326,7 +2377,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## If your etcd is not deployed as a pod, specify IPs it can be found on ## -@@ -949,7 +1337,7 @@ +@@ -949,7 +1371,7 @@ ## Component scraping kube scheduler ## kubeScheduler: @@ -2335,7 +2386,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on ## -@@ -1002,7 +1390,7 @@ +@@ -1002,7 +1424,7 @@ ## Component scraping kube proxy ## kubeProxy: @@ -2344,7 +2395,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## -@@ -1076,6 +1464,13 @@ +@@ -1076,6 +1498,13 @@ create: true podSecurityPolicy: enabled: true @@ -2358,7 +2409,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Deploy node exporter as a daemonset to all nodes ## -@@ -1125,6 +1520,16 @@ +@@ -1125,6 +1554,16 @@ extraArgs: - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ @@ -2375,7 +2426,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Manages Prometheus and Alertmanager components ## -@@ -1138,7 +1543,7 @@ +@@ -1138,7 +1577,7 @@ tlsProxy: enabled: true image: @@ -2384,7 +2435,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val tag: v1.5.2 sha: "" pullPolicy: IfNotPresent -@@ -1156,7 +1561,7 @@ +@@ -1156,7 +1595,7 @@ patch: enabled: true image: @@ -2393,7 +2444,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val tag: v1.2.1 sha: "" pullPolicy: IfNotPresent -@@ -1285,13 +1690,13 @@ +@@ -1285,13 +1724,13 @@ ## Resource limits & requests ## @@ -2414,7 +2465,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working -@@ -1335,7 +1740,7 @@ +@@ -1335,7 +1774,7 @@ ## Prometheus-operator image ## image: @@ -2423,7 +2474,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val tag: v0.38.1 sha: "" pullPolicy: IfNotPresent -@@ -1343,14 +1748,14 @@ +@@ -1343,14 +1782,14 @@ ## Configmap-reload image to use for reloading configmaps ## configmapReloadImage: @@ -2440,7 +2491,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val tag: v0.38.1 sha: "" -@@ -1366,14 +1771,6 @@ +@@ -1366,14 +1805,6 @@ ## secretFieldSelector: "" @@ -2455,7 +2506,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Deploy a Prometheus instance ## prometheus: -@@ -1614,7 +2011,7 @@ +@@ -1614,7 +2045,7 @@ ## Image of Prometheus. ## image: @@ -2464,7 +2515,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val tag: v2.18.2 sha: "" -@@ -1666,6 +2063,11 @@ +@@ -1666,6 +2097,11 @@ ## externalUrl: "" @@ -2476,7 +2527,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## Define which Nodes the Pods are scheduled on. ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## -@@ -1698,7 +2100,7 @@ +@@ -1698,7 +2134,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the PrometheusRule resources created ## @@ -2485,7 +2536,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## PrometheusRules to be selected for target discovery. ## If {}, select all ServiceMonitors -@@ -1723,7 +2125,7 @@ +@@ -1723,7 +2159,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the servicemonitors created ## @@ -2494,7 +2545,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## ServiceMonitors to be selected for target discovery. ## If {}, select all ServiceMonitors -@@ -1743,7 +2145,7 @@ +@@ -1743,7 +2179,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the podmonitors created ## @@ -2503,7 +2554,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val ## PodMonitors to be selected for target discovery. ## If {}, select all PodMonitors -@@ -1840,9 +2242,13 @@ +@@ -1840,9 +2276,13 @@ ## Resource limits & requests ##