Merge pull request #677 from aiyengar2/fix_magic_links

Fix broken magic links for Grafana and Prometheus
pull/683/head
aiyengar2 2020-09-22 14:56:33 -07:00 committed by GitHub
commit f7cee8c5c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 307 additions and 22 deletions

View File

@ -17,6 +17,8 @@ All notable changes from the upstream Prometheus Operator chart will be added to
- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter`
- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`.
- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart.
- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`.
- Added a default `nginx` proxy container deployed with Prometheus whose config is set in the `ConfigMap` located in `templates/prometheus/nginx-config.yaml`. The purpose of this container is to make it possible to view Prometheus's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `web`), which is also where the Prometheus service will now point to, and will forward all requests to the Prometheus container listening on the default port `9090`.
### Modified
- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml`
- Modified the default `node-exporter` port from `9100` to `9796`
@ -39,3 +41,4 @@ All notable changes from the upstream Prometheus Operator chart will be added to
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and set `grafana.rbac.pspUseAppArmor=false` in order to make it possible to deploy this chart on a hardened cluster without AppArmor installed.
- Modified `.Values.prometheus.prometheusSpec.containers` to take in a string representing a template that should be rendered by Helm (via `tpl`) instead of allowing a user to provide YAML directly.

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-nginx-proxy-config
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
nginx.conf: |-
worker_processes auto;
error_log /dev/stdout warn;
pid /var/cache/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)';
proxy_connect_timeout 10;
proxy_read_timeout 180;
proxy_send_timeout 5;
proxy_buffering off;
proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g;
server {
listen 8080;
access_log off;
gzip on;
gzip_min_length 1k;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
proxy_set_header Host $host;
location /api/dashboards {
proxy_pass http://localhost:3000;
}
location /api/search {
proxy_pass http://localhost:3000;
sub_filter_types application/json;
sub_filter_once off;
sub_filter '"url":"/d' '"url":"d';
}
location / {
proxy_cache my_zone;
proxy_cache_valid 200 302 1d;
proxy_cache_valid 301 30d;
proxy_cache_valid any 5m;
proxy_cache_bypass $http_cache_control;
add_header X-Proxy-Cache $upstream_cache_status;
add_header Cache-Control "public";
proxy_pass http://localhost:3000/;
sub_filter_types text/html;
sub_filter_once off;
sub_filter '"appSubUrl":""' '"appSubUrl":"."';
sub_filter '"url":"/' '"url":"./';
sub_filter ':"/avatar/' ':"avatar/';
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
}
}
}

View File

@ -0,0 +1,66 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-nginx-proxy-config
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-prometheus
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
{{- if .Values.prometheus.annotations }}
annotations:
{{ toYaml .Values.prometheus.annotations | indent 4 }}
{{- end }}
data:
nginx.conf: |-
worker_processes auto;
error_log /dev/stdout warn;
pid /var/cache/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)';
proxy_connect_timeout 10;
proxy_read_timeout 180;
proxy_send_timeout 5;
proxy_buffering off;
proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g;
server {
listen 8080;
access_log off;
gzip on;
gzip_min_length 1k;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
proxy_set_header Host $host;
location / {
proxy_cache my_zone;
proxy_cache_valid 200 302 1d;
proxy_cache_valid 301 30d;
proxy_cache_valid any 5m;
proxy_cache_bypass $http_cache_control;
add_header X-Proxy-Cache $upstream_cache_status;
add_header Cache-Control "public";
proxy_pass http://localhost:9090/;
sub_filter_types text/html;
sub_filter_once off;
sub_filter 'var PATH_PREFIX = "";' 'var PATH_PREFIX = ".";';
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
}
}
}

View File

@ -193,6 +193,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/cha
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
@@ -285,7 +284,7 @@
{{- end }}
ports:
- name: {{ .Values.service.portName }}
- containerPort: {{ .Values.service.port }}
+ containerPort: {{ .Values.service.targetPort }}
protocol: TCP
- name: {{ .Values.podPortName }}
containerPort: 3000
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/charts/grafana/values.yaml packages/rancher-monitoring/charts/charts/grafana/values.yaml
--- packages/rancher-monitoring/charts-original/charts/grafana/values.yaml
+++ packages/rancher-monitoring/charts/charts/grafana/values.yaml
@ -1465,6 +1474,15 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/tem
{{- if .Values.prometheus.prometheusSpec.nodeSelector }}
nodeSelector:
{{ toYaml .Values.prometheus.prometheusSpec.nodeSelector | indent 4 }}
@@ -226,7 +229,7 @@
{{- end }}
{{- if .Values.prometheus.prometheusSpec.containers }}
containers:
-{{ toYaml .Values.prometheus.prometheusSpec.containers | indent 4 }}
+{{ tpl .Values.prometheus.prometheusSpec.containers $ | indent 4 }}
{{- end }}
{{- if .Values.prometheus.prometheusSpec.initContainers }}
initContainers:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/templates/prometheus/rules/etcd.yaml packages/rancher-monitoring/charts/templates/prometheus/rules/etcd.yaml
--- packages/rancher-monitoring/charts-original/templates/prometheus/rules/etcd.yaml
+++ packages/rancher-monitoring/charts/templates/prometheus/rules/etcd.yaml
@ -2273,16 +2291,18 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Annotations for Grafana dashboard configmaps
##
@@ -575,6 +942,19 @@
@@ -574,7 +935,60 @@
## Passed to grafana subchart and used by servicemonitor below
##
service:
portName: service
- portName: service
+ portName: nginx-http
+ ## Port for Grafana Service to listen on
+ ##
+ port: 80
+ ## To be used with a proxy extraContainer port
+ ##
+ targetPort: 3000
+ targetPort: 8080
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
@ -2290,10 +2310,50 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
+ ## Service type
+ ##
+ type: ClusterIP
+
+ proxy:
+ image:
+ repository: rancher/library-nginx
+ tag: 1.19.2-alpine
+
+ ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
+ extraContainers: |
+ - name: grafana-proxy
+ args:
+ - nginx
+ - -g
+ - daemon off;
+ - -c
+ - /nginx/nginx.conf
+ image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
+ ports:
+ - containerPort: 8080
+ name: nginx-http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /nginx
+ name: grafana-nginx
+ - mountPath: /var/cache/nginx
+ name: nginx-home
+ securityContext:
+ runAsUser: 101
+ runAsGroup: 101
+
+ ## Volumes that can be used in containers
+ extraContainerVolumes:
+ - name: nginx-home
+ emptyDir: {}
+ - name: grafana-nginx
+ configMap:
+ name: grafana-nginx-proxy-config
+ items:
+ - key: nginx.conf
+ mode: 438
+ path: nginx.conf
## If true, create a serviceMonitor for grafana
##
@@ -600,6 +980,14 @@
@@ -600,6 +1014,14 @@
# targetLabel: nodename
# replacement: $1
# action: replace
@ -2308,7 +2368,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Component scraping the kube api server
##
@@ -756,7 +1144,7 @@
@@ -756,7 +1178,7 @@
## Component scraping the kube controller manager
##
kubeControllerManager:
@ -2317,7 +2377,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
##
@@ -889,7 +1277,7 @@
@@ -889,7 +1311,7 @@
## Component scraping etcd
##
kubeEtcd:
@ -2326,7 +2386,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your etcd is not deployed as a pod, specify IPs it can be found on
##
@@ -949,7 +1337,7 @@
@@ -949,7 +1371,7 @@
## Component scraping kube scheduler
##
kubeScheduler:
@ -2335,7 +2395,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
##
@@ -1002,7 +1390,7 @@
@@ -1002,7 +1424,7 @@
## Component scraping kube proxy
##
kubeProxy:
@ -2344,7 +2404,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
##
@@ -1076,6 +1464,13 @@
@@ -1076,6 +1498,13 @@
create: true
podSecurityPolicy:
enabled: true
@ -2358,7 +2418,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Deploy node exporter as a daemonset to all nodes
##
@@ -1125,6 +1520,16 @@
@@ -1125,6 +1554,16 @@
extraArgs:
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
@ -2375,7 +2435,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Manages Prometheus and Alertmanager components
##
@@ -1138,7 +1543,7 @@
@@ -1138,7 +1577,7 @@
tlsProxy:
enabled: true
image:
@ -2384,7 +2444,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v1.5.2
sha: ""
pullPolicy: IfNotPresent
@@ -1156,7 +1561,7 @@
@@ -1156,7 +1595,7 @@
patch:
enabled: true
image:
@ -2393,7 +2453,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v1.2.1
sha: ""
pullPolicy: IfNotPresent
@@ -1285,13 +1690,13 @@
@@ -1285,13 +1724,13 @@
## Resource limits & requests
##
@ -2414,7 +2474,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
@@ -1335,7 +1740,7 @@
@@ -1335,7 +1774,7 @@
## Prometheus-operator image
##
image:
@ -2423,7 +2483,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v0.38.1
sha: ""
pullPolicy: IfNotPresent
@@ -1343,14 +1748,14 @@
@@ -1343,14 +1782,14 @@
## Configmap-reload image to use for reloading configmaps
##
configmapReloadImage:
@ -2440,7 +2500,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v0.38.1
sha: ""
@@ -1366,14 +1771,6 @@
@@ -1366,14 +1805,6 @@
##
secretFieldSelector: ""
@ -2455,7 +2515,16 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Deploy a Prometheus instance
##
prometheus:
@@ -1614,7 +2011,7 @@
@@ -1403,7 +1834,7 @@
port: 9090
## To be used with a proxy extraContainer port
- targetPort: 9090
+ targetPort: 8080
## List of IP addresses at which the Prometheus server service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
@@ -1614,7 +2045,7 @@
## Image of Prometheus.
##
image:
@ -2464,7 +2533,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
tag: v2.18.2
sha: ""
@@ -1666,6 +2063,11 @@
@@ -1666,6 +2097,11 @@
##
externalUrl: ""
@ -2476,7 +2545,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
@@ -1698,7 +2100,7 @@
@@ -1698,7 +2134,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the PrometheusRule resources created
##
@ -2485,7 +2554,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## PrometheusRules to be selected for target discovery.
## If {}, select all ServiceMonitors
@@ -1723,7 +2125,7 @@
@@ -1723,7 +2159,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the servicemonitors created
##
@ -2494,7 +2563,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## ServiceMonitors to be selected for target discovery.
## If {}, select all ServiceMonitors
@@ -1743,7 +2145,7 @@
@@ -1743,7 +2179,7 @@
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the podmonitors created
##
@ -2503,7 +2572,7 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors
@@ -1840,9 +2242,13 @@
@@ -1840,9 +2276,13 @@
## Resource limits & requests
##
@ -2520,3 +2589,75 @@ diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-monitoring/charts-original/val
## Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
@@ -1857,11 +2297,6 @@
# storage: 50Gi
# selector: {}
- # Additional volumes on the output StatefulSet definition.
- volumes: []
- # Additional VolumeMounts on the output StatefulSet definition.
- volumeMounts: []
-
## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
## as specified in the official Prometheus documentation:
@@ -1964,9 +2399,49 @@
##
thanos: {}
+ proxy:
+ image:
+ repository: rancher/library-nginx
+ tag: 1.19.2-alpine
+
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
## if using proxy extraContainer update targetPort with proxy container port
- containers: []
+ containers: |
+ - name: prometheus-proxy
+ args:
+ - nginx
+ - -g
+ - daemon off;
+ - -c
+ - /nginx/nginx.conf
+ image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}"
+ ports:
+ - containerPort: 8080
+ name: nginx-http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /nginx
+ name: prometheus-nginx
+ - mountPath: /var/cache/nginx
+ name: nginx-home
+ securityContext:
+ runAsUser: 101
+ runAsGroup: 101
+
+ # Additional volumes on the output StatefulSet definition.
+ volumes:
+ - name: nginx-home
+ emptyDir: {}
+ - name: prometheus-nginx
+ configMap:
+ name: prometheus-nginx-proxy-config
+ items:
+ - key: nginx.conf
+ mode: 438
+ path: nginx.conf
+
+ # Additional VolumeMounts on the output StatefulSet definition.
+ volumeMounts: []
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
## (permissions, dir tree) on mounted volumes before starting prometheus
@@ -1974,7 +2449,7 @@
## PortName to use for Prometheus.
##
- portName: "web"
+ portName: "nginx-http"
additionalServiceMonitors: []
## Name of the ServiceMonitor to create