[dev-v2.9] rancher-monitoring 104.1.3-rc.1+up57.0.3 create (#4755)

pull/4765/head
Julia Bier 2024-11-14 12:53:16 -04:00 committed by GitHub
parent 7bcf93fc8b
commit 5300408370
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
587 changed files with 105338 additions and 37 deletions

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/release-name: rancher-monitoring-crd
apiVersion: v2
description: Installs the CRDs for rancher-monitoring.
name: rancher-monitoring-crd
type: application
version: 104.1.3-rc.1+up57.0.3

View File

@ -0,0 +1,24 @@
# rancher-monitoring-crd
A Rancher chart that installs the CRDs used by rancher-monitoring.
## How does this chart work?
This chart marshalls all of the CRD files placed in the `crd-manifest` directory into a ConfigMap that is installed onto a cluster alongside relevant RBAC (ServiceAccount, ClusterRoleBinding, ClusterRole, and PodSecurityPolicy).
Once the relevant dependent resourcees are installed / upgraded / rolled back, this chart executes a post-install / post-upgrade / post-rollback Job that:
- Patches any existing versions of the CRDs contained within the `crd-manifest` on the cluster to set `spec.preserveUnknownFields=false`; this step is required since, based on [Kubernetes docs](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning) and a [known workaround](https://github.com/kubernetes-sigs/controller-tools/issues/476#issuecomment-691519936), such CRDs cannot be upgraded normally from `apiextensions.k8s.io/v1beta1` to `apiextensions.k8s.io/v1`.
- Runs a `kubectl apply` on the CRDs that are contained within the crd-manifest ConfigMap to upgrade CRDs in the cluster
On an uninstall, this chart executes a separate post-delete Job that:
- Patches any existing versions of the CRDs contained within `crd-manifest` on the cluster to set `metadata.finalizers=[]`
- Runs a `kubectl delete` on the CRDs that are contained within the crd-manifest ConfigMap to clean up the CRDs from the cluster
Note: If the relevant CRDs already existed in the cluster at the time of install, this chart will absorb ownership of the lifecycle of those CRDs; therefore, on a `helm uninstall`, those CRDs will also be removed from the cluster alongside this chart.
## Why can't we just place the CRDs in the templates/ directory of the main chart?
In Helm today, you cannot declare a CRD and declare a resource of that CRD's kind in templates/ without encountering a failure on render.
## [Helm 3] Why can't we just place the CRDs in the crds/ directory of the main chart?
The Helm 3 `crds/` directory only supports the installation of CRDs, but does not support the upgrade and removal of CRDs, unlike what this chart facilitiates.

View File

@ -0,0 +1,30 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,102 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-create
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": post-install, post-upgrade, post-rollback
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed
spec:
template:
metadata:
name: {{ .Chart.Name }}-create
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: false
runAsUser: 0
containers:
- name: create-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
echo "Applying CRDs...";
mkdir -p /etc/crd;
base64 -d /etc/config/crd-manifest.tgz.b64 | tar -xzv -C /etc/crd;
kubectl replace -Rf /etc/crd || kubectl create -Rf /etc/crd;
echo "Done!"
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-delete
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed
spec:
template:
metadata:
name: {{ .Chart.Name }}-delete
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: false
runAsUser: 0
containers:
- name: delete-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
echo "Deleting CRDs...";
mkdir -p /etc/crd;
base64 -d /etc/config/crd-manifest.tgz.b64 | tar -xzv -C /etc/crd;
kubectl delete --ignore-not-found=true -Rf /etc/crd;
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Chart.Name }}-manifest
namespace: {{ .Release.Namespace }}
data:
crd-manifest.tgz.b64:
{{- .Files.Get "files/crd-manifest.tgz" | b64enc | indent 4 }}

View File

@ -0,0 +1,76 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ['create', 'get', 'patch', 'delete', 'update', 'list']
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ .Chart.Name }}-manager
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-manager
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'
{{- end }}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,17 @@
# Default values for rancher-monitoring-crd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
psp:
enabled: false
systemDefaultRegistry: ""
image:
repository: rancher/shell
tag: v0.2.1
nodeSelector: {}
tolerations: []

View File

@ -0,0 +1,5 @@
root = true
[files/dashboards/*.json]
indent_size = 2
indent_style = space

View File

@ -0,0 +1,29 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# helm/charts
OWNERS
hack/
ci/
kube-prometheus-*.tgz
unittests/
files/dashboards/

View File

@ -0,0 +1,47 @@
# Changelog
All notable changes from the upstream Prometheus Operator chart will be added to this file.
## [Package Version 00] - 2020-07-19
### Added
- Added [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) as a dependency to the upstream Prometheus Operator chart to allow users to expose custom metrics from the default Prometheus instance deployed by this chart
- Remove `prometheus-operator/cleanup-crds.yaml` and `prometheus-operator/crds.yaml` from the Prometheus Operator upstream chart in favor of just using the CRD directory to install the CRDs.
- Added support for `rkeControllerManager`, `rkeScheduler`, `rkeProxy`, and `rkeEtcd` PushProx exporters for monitoring k8s components within RKE clusters
- Added support for a `k3sServer` PushProx exporter that monitors k3s server components (`kubeControllerManager`, `kubeScheduler`, and `kubeProxy`) within k3s clusters
- Added support for `kubeAdmControllerManager`, `kubeAdmScheduler`, `kubeAdmProxy`, and `kubeAdmEtcd` PushProx exporters for monitoring k8s components within kubeAdm clusters
- Added support for `rke2ControllerManager`, `rke2Scheduler`, `rke2Proxy`, and `rke2Etcd` PushProx exporters for monitoring k8s components within rke2 clusters
- Exposed `prometheus.prometheusSpec.ignoreNamespaceSelectors` on values.yaml and set it to `false` by default. This value instructs the default Prometheus server deployed with this chart to ignore the `namespaceSelector` field within any created ServiceMonitor or PodMonitor CRs that it selects. This prevents ServiceMonitors and PodMonitors from configuring the Prometheus scrape configuration to monitor resources outside the namespace that they are deployed in; if a user needs to have one ServiceMonitor / PodMonitor monitor resources within several namespaces (such as the resources that are used to monitor Istio in a default installation), they should not enable this option since it would require them to create one ServiceMonitor / PodMonitor CR per namespace that they would like to monitor. Relevant fields were also updated in the default README.md.
- Added `grafana.sidecar.dashboards.searchNamespace` to `values.yaml` with a default value of `cattle-dashboards`. The namespace provided should contain all ConfigMaps with the label `grafana_dashboard` and will be searched by the Grafana Dashboards sidecar for updates. The namespace specified is also created along with this deployment. All default dashboard ConfigMaps have been relocated from the deployment namespace to the namespace specified
- Added `monitoring-admin`, `monitoring-edit`, and `monitoring-view` default `ClusterRoles` to allow admins to assign roles to users to interact with Prometheus Operator CRs. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `ClusterRoleBinding` to bind these roles to a Subject to allow them to set up or view `ServiceMonitors` / `PodMonitors` / `PrometheusRules` and view `Prometheus` or `Alertmanager` CRs across the cluster. If `.Values.global.rbac.userRoles.aggregateRolesForRBAC` is enabled, these ClusterRoles will aggregate into the respective default ClusterRoles provided by Kubernetes
- Added `monitoring-config-admin`, `monitoring-config-edit` and `monitoring-config-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `Secrets` and `ConfigMaps` within the `cattle-monitoring-system` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-monitoring-system` namespace to allow them to modify Secrets / ConfigMaps tied to the deployment, such as your Alertmanager Config Secret.
- Added `monitoring-dashboard-admin`, `monitoring-dashboard-edit` and `monitoring-dashboard-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `ConfigMaps` within the `cattle-dashboards` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`) and deploying Grafana as part of this chart. In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-dashboards` namespace to allow them to create / modify ConfigMaps that contain the JSON used to persist Grafana Dashboards on the cluster.
- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter`
- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`.
- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart.
- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`.
- Added a default `nginx` proxy container deployed with Prometheus whose config is set in the `ConfigMap` located in `templates/prometheus/nginx-config.yaml`. The purpose of this container is to make it possible to view Prometheus's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8081` (with a `portName` of `nginx-http` instead of the default `web`), which is also where the Prometheus service will now point to, and will forward all requests to the Prometheus container listening on the default port `9090`.
- Added support for passing CIS Scans in a hardened cluster by introducing a Job that patches the default service account within the `cattle-monitoring-system` and `cattle-dashboards` namespaces on install or upgrade and adding a default allow all `NetworkPolicy` to the `cattle-monitoring-system` and `cattle-dashboards` namespaces.
### Modified
- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml`
- Modified the default `node-exporter` port from `9100` to `9796`
- Modified the default `nameOverride` to `rancher-monitoring`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Modified the default `namespaceOverride` to `cattle-monitoring-system`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Configured some default values for `grafana.service` values and exposed them in the default README.md
- The default namespaces the following ServiceMonitors were changed from the deployment namespace to allow them to continue to monitor metrics when `prometheus.prometheusSpec.ignoreNamespaceSelectors` is enabled:
- `core-dns`: `kube-system`
- `api-server`: `default`
- `kube-controller-manager`: `kube-system`
- `kubelet`: `{{ .Values.kubelet.namespace }}`
- Disabled the following deployments by default (can be enabled if required):
- `AlertManager`
- `kube-controller-manager` metrics exporter
- `kube-etcd` metrics exporter
- `kube-scheduler` metrics exporter
- `kube-proxy` metrics exporter
- Updated default Grafana `deploymentStrategy` to `Recreate` to prevent deployments from being stuck on upgrade if a PV is attached to Grafana
- Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`.
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and replaced `grafana.rbac.pspUseAppArmor` in favor of `grafana.rbac.pspAnnotations={}` in order to make it possible to deploy this chart on a hardened cluster which does not support Seccomp or AppArmor annotations in PSPs. Users can always choose to specify the annotations they want to use for the PSP directly as part of the values provided.
- Modified `.Values.prometheus.prometheusSpec.containers` to take in a string representing a template that should be rendered by Helm (via `tpl`) instead of allowing a user to provide YAML directly.
- Modified the default Grafana configuration to auto assign users who access Grafana to the Viewer role and enable anonymous access to Grafana dashboards by default. This default works well for a Rancher user who is accessing Grafana via the `kubectl proxy` on the Rancher Dashboard UI since anonymous users who enter via the proxy are authenticated by the k8s API Server, but you can / should modify this behavior if you plan on exposing Grafana in a way that does not require authentication (e.g. as a `NodePort` service).
- Modified the default Grafana configuration to add a default dashboard for Rancher on the Grafana home page.

View File

@ -0,0 +1,12 @@
# Contributing Guidelines
## How to contribute to this chart
1. Fork this repository, develop and test your Chart.
1. Bump the chart version for every change.
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
1. Check for changes of RBAC rules.
1. Check for changes in CRD specs.
1. PR must pass the linter (`helm lint`)

View File

@ -0,0 +1,126 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
- name: Upstream Project
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
catalog.cattle.io/auto-install: rancher-monitoring-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Monitoring
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: rancher-monitoring
catalog.cattle.io/requests-cpu: 4500m
catalog.cattle.io/requests-memory: 4000Mi
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: monitoring
catalog.cattle.io/upstream-version: 57.0.3
apiVersion: v2
appVersion: v0.72.0
dependencies:
- condition: grafana.enabled
name: grafana
repository: file://./charts/grafana
- condition: hardenedKubelet.enabled
name: hardenedKubelet
repository: file://./charts/hardenedKubelet
- condition: hardenedNodeExporter.enabled
name: hardenedNodeExporter
repository: file://./charts/hardenedNodeExporter
- condition: k3sServer.enabled
name: k3sServer
repository: file://./charts/k3sServer
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: file://./charts/kube-state-metrics
- condition: kubeAdmControllerManager.enabled
name: kubeAdmControllerManager
repository: file://./charts/kubeAdmControllerManager
- condition: kubeAdmEtcd.enabled
name: kubeAdmEtcd
repository: file://./charts/kubeAdmEtcd
- condition: kubeAdmProxy.enabled
name: kubeAdmProxy
repository: file://./charts/kubeAdmProxy
- condition: kubeAdmScheduler.enabled
name: kubeAdmScheduler
repository: file://./charts/kubeAdmScheduler
- condition: prometheus-adapter.enabled
name: prometheus-adapter
repository: file://./charts/prometheus-adapter
- condition: nodeExporter.enabled
name: prometheus-node-exporter
repository: file://./charts/prometheus-node-exporter
- condition: rke2ControllerManager.enabled
name: rke2ControllerManager
repository: file://./charts/rke2ControllerManager
- condition: rke2Etcd.enabled
name: rke2Etcd
repository: file://./charts/rke2Etcd
- condition: rke2IngressNginx.enabled
name: rke2IngressNginx
repository: file://./charts/rke2IngressNginx
- condition: rke2Proxy.enabled
name: rke2Proxy
repository: file://./charts/rke2Proxy
- condition: rke2Scheduler.enabled
name: rke2Scheduler
repository: file://./charts/rke2Scheduler
- condition: rkeControllerManager.enabled
name: rkeControllerManager
repository: file://./charts/rkeControllerManager
- condition: rkeEtcd.enabled
name: rkeEtcd
repository: file://./charts/rkeEtcd
- condition: rkeIngressNginx.enabled
name: rkeIngressNginx
repository: file://./charts/rkeIngressNginx
- condition: rkeProxy.enabled
name: rkeProxy
repository: file://./charts/rkeProxy
- condition: rkeScheduler.enabled
name: rkeScheduler
repository: file://./charts/rkeScheduler
- condition: windowsExporter.enabled
name: windowsExporter
repository: file://./charts/windowsExporter
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
Operator.
home: https://github.com/prometheus-operator/kube-prometheus
icon: file://assets/logos/rancher-monitoring.png
keywords:
- operator
- prometheus
- kube-prometheus
kubeVersion: '>=1.19.0-0'
maintainers:
- email: andrew@quadcorps.co.uk
name: andrewgkew
- email: gianrubio@gmail.com
name: gianrubio
- email: github.gkarthiks@gmail.com
name: gkarthiks
- email: kube-prometheus-stack@sisti.pt
name: GMartinez-Sisti
- email: github@jkroepke.de
name: jkroepke
- email: scott@r6by.com
name: scottrigby
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: quentin.bisson@gmail.com
name: QuentinBisson
name: rancher-monitoring
sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 104.1.3-rc.1+up57.0.3

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,46 @@
# Rancher Monitoring and Alerting
This chart is based on the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) chart. The chart deploys [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) and its CRDs along with [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana), [Prometheus Adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter) and additional charts / Kubernetes manifests to gather metrics. It allows users to monitor their Kubernetes clusters, view metrics in Grafana dashboards, and set up alerts and notifications.
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/).
The chart installs the following components:
- [Prometheus Operator](https://github.com/coreos/prometheus-operator) - The operator provides easy monitoring definitions for Kubernetes services, manages [Prometheus](https://prometheus.io/) and [AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/) instances, and adds default scrape targets for some Kubernetes components.
- [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) - A collection of community-curated Kubernetes manifests, Grafana Dashboards, and PrometheusRules that deploy a default end-to-end cluster monitoring configuration.
- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana) - Grafana allows a user to create / view dashboards based on the cluster metrics collected by Prometheus.
- [node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) / [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) / [rancher-pushprox](https://github.com/rancher/charts/tree/dev-v2.7/packages/rancher-monitoring/rancher-pushprox/charts) - These charts monitor various Kubernetes components across different Kubernetes cluster types.
- [Prometheus Adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter) - The adapter allows a user to expose custom metrics, resource metrics, and external metrics on the default [Prometheus](https://prometheus.io/) instance to the Kubernetes API Server.
For more information, review the Helm README of this chart.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Upgrading from 100.0.0+up16.6.0 to 100.1.0+up19.0.3
### Noticeable changes:
Grafana:
- `sidecar.dashboards.searchNamespace`, `sidecar.datasources.searchNamespace` and `sidecar.notifiers.searchNamespace` support a list of namespaces now.
Kube-state-metrics
- the type of `collectors` is changed from Dictionary to List.
- `kubeStateMetrics.serviceMonitor.namespaceOverride` was replaced by `kube-state-metrics.namespaceOverride`.
### Known issues:
- Occasionally, the upgrade fails with errors related to the webhook `prometheusrulemutate.monitoring.coreos.com`. This is a known issue in the upstream, and the workaround is to trigger the upgrade one more time. [32416](https://github.com/rancher/rancher/issues/32416#issuecomment-828881726)

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.vscode
.project
.idea/
*.tmproj
OWNERS

View File

@ -0,0 +1,39 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/grafana/helm-charts
- name: Upstream Project
url: https://github.com/grafana/grafana
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-grafana
apiVersion: v2
appVersion: 10.4.1
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.com
icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116
keywords:
- monitoring
- metric
kubeVersion: '>=1.26.0-0'
maintainers:
- email: zanhsieh@gmail.com
name: zanhsieh
- email: rluckie@cisco.com
name: rtluckie
- email: maor.friedman@redhat.com
name: maorfr
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: mail@torstenwalter.de
name: torstenwalter
name: grafana
sources:
- https://github.com/grafana/grafana
- https://github.com/grafana/helm-charts
type: application
version: 7.3.11

View File

@ -0,0 +1,770 @@
# Grafana Helm Chart
* Installs the web dashboarding system [Grafana](http://grafana.org/)
## Get Repo Info
```console
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release grafana/grafana
```
## Uninstalling the Chart
To uninstall/delete the my-release deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### To 4.0.0 (And 3.12.1)
This version requires Helm >= 2.12.0.
### To 5.0.0
You have to add --force to your helm upgrade command as the labels of the chart have changed.
### To 6.0.0
This version requires Helm >= 3.1.0.
### To 7.0.0
For consistency with other Helm charts, the `global.image.registry` parameter was renamed
to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action
is required on upgrade. If you were previously setting `global.image.registry`, you will
need to instead set `global.imageRegistry`.
## Configuration
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` |
| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
| `image.registry` | Image registry | `docker.io` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` |
| `image.sha` | Image sha (optional) | `` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` |
| `service.enabled` | Enable grafana service | `true` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.portName` | Name of the port on the service | `service` |
| `service.appProtocol` | Adds the appProtocol field to the service | `` |
| `service.targetPort` | Internal service is port | `3000` |
| `service.nodePort` | Kubernetes service nodePort | `nil` |
| `service.annotations` | Service annotations (can be templated) | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.clusterIP` | internal cluster service IP | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
| `service.externalIPs` | service external IP addresses | `[]` |
| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` |
| `headlessService` | Create a headless service | `false` |
| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.path` | Ingress accepted path | `/` |
| `ingress.pathType` | Ingress type of path | `Prefix` |
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
| `extraContainers` | Sidecar containers to add to the grafana pod | `""` |
| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
| `extraLabels` | Custom labels for all manifests | `{}` |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` |
| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` |
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` |
| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` |
| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` |
| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` |
| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` |
| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraVolumes` | Additional Grafana server volumes | `[]` |
| `automountServiceAccountToken` | Mounted the service account token on the grafana pod. Mandatory, if sidecars are enabled | `true` |
| `createConfigmap` | Enable creating the grafana configmap | `true` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
| `alerting` | Configure grafana alerting (passed through tpl) | `{}` |
| `notifiers` | Configure grafana notifiers | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `global.imageRegistry` | Global image pull registry for all images. | `null` |
| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` |
| `ldap.enabled` | Enable LDAP authentication | `false` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `labels` | Deployment labels | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Pod labels | `{}` |
| `podPortName` | Name of the grafana port on the pod | `grafana` |
| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` |
| `sidecar.image.registry` | Sidecar image registry | `quay.io` |
| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.26.0` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
| `sidecar.securityContext` | Sidecar securityContext | `{}` |
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` |
| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` |
| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` |
| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.alerts.initAlerts` | Set to true to deploy the alerts sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
| `sidecar.dashboards.provider.type` | Provider type | `file` |
| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` |
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` |
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` |
| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` |
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` |
| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.automountServiceAccountToken` | Automount the service account token on all pods where is service account is used | `false` |
| `serviceAccount.annotations` | ServiceAccount annotations | |
| `serviceAccount.create` | Create service account | `true` |
| `serviceAccount.labels` | ServiceAccount labels | `{}` |
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
| `rbac.create` | Create and use RBAC resources | `true` |
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` |
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` |
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
| `command` | Define command to be executed by grafana container at startup | `nil` |
| `args` | Define additional args if command is used | `nil` |
| `testFramework.enabled` | Whether to create test-related resources | `true` |
| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` |
| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` |
| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` |
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` |
| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` |
| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` |
| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
| `serviceMonitor.path` | Path to scrape | `/metrics` |
| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` |
| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` |
| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` |
| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` |
| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` |
| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` |
| `imageRenderer.image.tag` | image-renderer Image tag | `latest` |
| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
| `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` |
| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` |
| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` |
| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` |
| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` |
| `imageRenderer.service.portName` | image-renderer service port name | `http` |
| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` |
| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` |
| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` |
| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` |
| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` |
| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` |
| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` |
| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` |
| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` |
| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` |
| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` |
| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` |
| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` |
| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` |
### Example ingress with path
With grafana 6.3 and above
```yaml
grafana.ini:
server:
domain: monitoring.example.com
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
hosts:
- "monitoring.example.com"
path: "/grafana"
```
### Example of extraVolumeMounts and extraVolumes
Configure additional volumes with `extraVolumes` and volume mounts with `extraVolumeMounts`.
Example for `extraVolumeMounts` and corresponding `extraVolumes`:
```yaml
extraVolumeMounts:
- name: plugins
mountPath: /var/lib/grafana/plugins
subPath: configs/grafana/plugins
readOnly: false
- name: dashboards
mountPath: /var/lib/grafana/dashboards
hostPath: /usr/shared/grafana/dashboards
readOnly: false
extraVolumes:
- name: plugins
existingClaim: existing-grafana-claim
- name: dashboards
hostPath: /usr/shared/grafana/dashboards
```
Volumes default to `emptyDir`. Set to `persistentVolumeClaim`,
`hostPath`, `csi`, or `configMap` for other types. For a
`persistentVolumeClaim`, specify an existing claim name with
`existingClaim`.
## Import dashboards
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
```yaml
dashboards:
default:
some-dashboard:
json: |
{
"annotations":
...
# Complete json file here
...
"title": "Some Dashboard",
"uid": "abcd1234",
"version": 1
}
custom-dashboard:
# This is a path to a file inside the dashboards directory inside the chart directory
file: dashboards/custom-dashboard.json
prometheus-stats:
# Ref: https://grafana.com/dashboards/2
gnetId: 2
revision: 2
datasource: Prometheus
loki-dashboard-quick-search:
gnetId: 12019
revision: 2
datasource:
- name: DS_PROMETHEUS
value: Prometheus
- name: DS_LOKI
value: Loki
local-dashboard:
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
```
## BASE64 dashboards
Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
### Gerrit use case
Gerrit API for download files has the following schema: <https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content> where {project-name} and
{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
dashboards are deleted/updated.
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: "1"
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported.
Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://<svc-name>.<namespace>.svc.cluster.local/api/admin/provisioning/datasources/reload`.
Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example values to add a postgres datasource as a kubernetes secret:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: grafana-datasources
labels:
grafana_datasource: 'true' # default value for: sidecar.datasources.label
stringData:
pg-db.yaml: |-
apiVersion: 1
datasources:
- name: My pg db datasource
type: postgres
url: my-postgresql-db:5432
user: db-readonly-user
secureJsonData:
password: 'SUperSEcretPa$$word'
jsonData:
database: my_datase
sslmode: 'disable' # disable/require/verify-ca/verify-full
maxOpenConns: 0 # Grafana v5.4+
maxIdleConns: 2 # Grafana v5.4+
connMaxLifetime: 14400 # Grafana v5.4+
postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10
timescaledb: false
# <bool> allow users to edit datasources from the UI.
editable: false
```
Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```yaml
datasources:
datasources.yaml:
apiVersion: 1
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```
## Sidecar for notifiers
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the notification channels in grafana can be imported. The secrets must be created before
`helm install` so that the notifiers init container can list the secrets.
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels):
```yaml
notifiers:
- name: notification-channel-1
type: slack
uid: notifier1
# either
org_id: 2
# or
org_name: Main Org.
is_default: true
send_reminder: true
frequency: 1h
disable_resolve_message: false
# See `Supported Settings` section for settings supporter for each
# alert notification type.
settings:
recipient: 'XXX'
token: 'xoxb'
uploadImage: true
url: https://slack.com
delete_notifiers:
- name: notification-channel-1
uid: notifier1
org_id: 2
- name: notification-channel-2
# default org_id: 1
```
## Sidecar for alerting resources
If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with
a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below).
This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/).
To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)).
You can use either JSON or YAML format.
Example config for an alert rule:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-alert
labels:
grafana_alert: "1"
data:
k8s-alert.yml: |-
apiVersion: 1
groups:
- orgId: 1
name: k8s-alert
[...]
```
To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule
and then create a configuration which deletes the alert rule.
Example deletion configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: delete-sample-grafana-alert
namespace: monitoring
labels:
grafana_alert: "1"
data:
delete-k8s-alert.yml: |-
apiVersion: 1
deleteRules:
- orgId: 1
uid: 16624780-6564-45dc-825c-8bded4ad92d3
```
## Statically provision alerting resources
If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above.
This will grab the alerting config and apply it statically at build time for the helm file.
There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method:
```yaml
alerting:
team1-alert-rules.yaml:
file: alerting/team1/rules.yaml
team2-alert-rules.yaml:
file: alerting/team2/rules.yaml
team3-alert-rules.yaml:
file: alerting/team3/rules.yaml
notification-policies.yaml:
file: alerting/shared/notification-policies.yaml
notification-templates.yaml:
file: alerting/shared/notification-templates.yaml
contactpoints.yaml:
apiVersion: 1
contactPoints:
- orgId: 1
name: Slack channel
receivers:
- uid: default-receiver
type: slack
settings:
# Webhook URL to be filled in
url: ""
# We need to escape double curly braces for the tpl function.
text: '{{ `{{ template "default.message" . }}` }}'
title: '{{ `{{ template "default.title" . }}` }}'
```
The two possibilities for static alerting resource provisioning are:
* Inlining the file contents as shown for contact points in the above example.
* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example.
### Important notes on file provisioning
* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning.
* The chart supports importing YAML and JSON files.
* The filename must be unique, otherwise one volume mount will overwrite the other.
* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped.
* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance.
* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases.
## How to serve Grafana with a path prefix (/grafana)
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
```yaml
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
path: /grafana/?(.*)
hosts:
- k8s.example.dev
grafana.ini:
server:
root_url: http://localhost:3000/grafana # this host can be localhost
```
## How to securely reference secrets in grafana.ini
This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets.
In grafana.ini:
```yaml
grafana.ini:
[auth.generic_oauth]
enabled = true
client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}
client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret}
```
Existing secret, or created along with helm:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: auth-generic-oauth-secret
type: Opaque
stringData:
client_id: <value>
client_secret: <value>
```
Include in the `extraSecretMounts` configuration flag:
```yaml
- extraSecretMounts:
- name: auth-generic-oauth-secret-mount
secretName: auth-generic-oauth-secret
defaultMode: 0440
mountPath: /etc/secrets/auth_generic_oauth
readOnly: true
```
### extraSecretMounts using a Container Storage Interface (CSI) provider
This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure)
```yaml
- extraSecretMounts:
- name: secrets-store-inline
mountPath: /run/secrets
readOnly: true
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "my-provider"
nodePublishSecretRef:
name: akv-creds
```
## Image Renderer Plug-In
This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker)
```yaml
imageRenderer:
enabled: true
```
### Image Renderer NetworkPolicy
By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance
### High Availability for unified alerting
If you want to run Grafana in a high availability cluster you need to enable
the headless service by setting `headlessService: true` in your `values.yaml`
file.
As next step you have to setup the `grafana.ini` in your `values.yaml` in a way
that it will make use of the headless service to obtain all the IPs of the
cluster. You should replace ``{{ Name }}`` with the name of your helm deployment.
```yaml
grafana.ini:
...
unified_alerting:
enabled: true
ha_peers: {{ Name }}-headless:9094
ha_listen_address: ${POD_IP}:9094
ha_advertise_address: ${POD_IP}:9094
alerting:
enabled: false
```

View File

@ -0,0 +1,55 @@
1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local
{{ if .Values.ingress.enabled }}
If you bind grafana to 80, please update values in values.yaml and reinstall:
```
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
command:
- "setcap"
- "'cap_net_bind_service=+ep'"
- "/usr/sbin/grafana-server &&"
- "sh"
- "/run.sh"
```
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
Or grafana would always crash.
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
http://{{ . }}
{{- end }}
{{- else }}
Get the Grafana URL to visit by running these commands in the same shell:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
http://$SERVICE_IP:{{ .Values.service.port -}}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000
{{- end }}
{{- end }}
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
{{- if not .Values.persistence.enabled }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
{{- end }}

View File

@ -0,0 +1,171 @@
{{/*
Generate config map data
*/}}
{{- define "grafana.configData" -}}
{{ include "grafana.assertNoLeakedSecrets" . }}
{{- $files := .Files }}
{{- $root := . -}}
{{- with .Values.plugins }}
plugins: {{ join "," . }}
{{- end }}
grafana.ini: |
{{- range $elem, $elemVal := index .Values "grafana.ini" }}
{{- if not (kindIs "map" $elemVal) }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := index .Values "grafana.ini" }}
{{- if kindIs "map" $value }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.datasources }}
{{- if not (hasKey $value "secret") }}
{{ $key }}: |
{{- tpl (toYaml $value | nindent 2) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if not (hasKey $value "secret") }}
{{ $key }}: |
{{- toYaml $value | nindent 2 }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
{{- if (hasKey $value "file") }}
{{ $key }}:
{{- toYaml ( $files.Get $value.file ) | nindent 2 }}
{{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}}
{{/* will be stored inside secret generated by "configSecret.yaml"*/}}
{{- else }}
{{ $key }}: |
{{- tpl (toYaml $value | nindent 2) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{- toYaml $value | nindent 2 }}
{{- end }}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{ $dashboardProviders := .Values.dashboardProviders }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
{{- if not $value.acceptHeader }}
-H "Accept: application/json" \
{{- else }}
-H "Accept: {{ $value.acceptHeader }}" \
{{- end }}
{{- if $value.token }}
-H "Authorization: token {{ $value.token }}" \
{{- end }}
{{- if $value.bearerToken }}
-H "Authorization: Bearer {{ $value.bearerToken }}" \
{{- end }}
{{- if $value.basic }}
-H "Authorization: Basic {{ $value.basic }}" \
{{- end }}
{{- if $value.gitlabToken }}
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
{{- end }}
-H "Content-Type: application/json;charset=UTF-8" \
{{- end }}
{{- $dpPath := "" -}}
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }}
{{- if eq $kd.name $provider }}
{{- $dpPath = $kd.options.path }}
{{- end }}
{{- end }}
{{- if $value.url }}
"{{ $value.url }}" \
{{- else }}
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
{{- end }}
{{- if $value.datasource }}
{{- if kindIs "string" $value.datasource }}
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
{{- end }}
{{- if kindIs "slice" $value.datasource }}
{{- range $value.datasource }}
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
{{- end }}
{{- end }}
{{- end }}
{{- if $value.b64content }}
| base64 -d \
{{- end }}
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
{{ end }}
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Generate dashboard json config map data
*/}}
{{- define "grafana.configDashboardProviderData" -}}
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
{{- end }}
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
options:
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- end -}}
{{- define "grafana.secretsData" -}}
{{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ include "grafana.password" . }}
{{- end }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,305 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create the name of the service account
*/}}
{{- define "grafana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- define "grafana.serviceAccountNameTest" -}}
{{- if .Values.serviceAccount.create }}
{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }}
{{- else }}
{{- default "default" .Values.serviceAccount.nameTest }}
{{- end }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "grafana.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "grafana.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.extraLabels }}
{{ toYaml . }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "grafana.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "grafana.imageRenderer.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.imageRenderer.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels ImageRenderer
*/}}
{{- define "grafana.imageRenderer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Looks if there's an existing secret and reuse its password. If not it generates
new password and use it.
*/}}
{{- define "grafana.password" -}}
{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }}
{{- if $secret }}
{{- index $secret "data" "admin-password" }}
{{- else }}
{{- (randAlphaNum 40) | b64enc | quote }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "grafana.rbac.apiVersion" -}}
{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" }}
{{- else }}
{{- print "rbac.authorization.k8s.io/v1beta1" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "grafana.ingress.apiVersion" -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }}
{{- print "networking.k8s.io/v1" }}
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
{{- print "networking.k8s.io/v1beta1" }}
{{- else }}
{{- print "extensions/v1beta1" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "grafana.hpa.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }}
{{- print "autoscaling/v2" }}
{{- else }}
{{- print "autoscaling/v2beta2" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for podDisruptionBudget.
*/}}
{{- define "grafana.podDisruptionBudget.apiVersion" -}}
{{- if $.Values.podDisruptionBudget.apiVersion }}
{{- print $.Values.podDisruptionBudget.apiVersion }}
{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
{{- print "policy/v1" }}
{{- else }}
{{- print "policy/v1beta1" }}
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "grafana.ingress.isStable" -}}
{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }}
{{- end }}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "grafana.ingress.supportsIngressClassName" -}}
{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "grafana.ingress.supportsPathType" -}}
{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets})
*/}}
{{- define "grafana.imagePullSecrets" -}}
{{- $root := .root }}
{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }}
{{- if eq (typeOf .) "map[string]interface {}" }}
- {{ toYaml (dict "name" (tpl .name $root)) | trim }}
{{- else }}
- name: {{ tpl . $root }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Checks whether or not the configSecret secret has to be created
*/}}
{{- define "grafana.shouldCreateConfigSecret" -}}
{{- $secretFound := false -}}
{{- range $key, $value := .Values.datasources }}
{{- if hasKey $value "secret" }}
{{- $secretFound = true}}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if hasKey $value "secret" }}
{{- $secretFound = true}}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
{{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }}
{{- $secretFound = true}}
{{- end }}
{{- end }}
{{- $secretFound}}
{{- end -}}
{{/*
Checks whether the user is attempting to store secrets in plaintext
in the grafana.ini configmap
*/}}
{{/* grafana.assertNoLeakedSecrets checks for sensitive keys in values */}}
{{- define "grafana.assertNoLeakedSecrets" -}}
{{- $sensitiveKeysYaml := `
sensitiveKeys:
- path: ["database", "password"]
- path: ["smtp", "password"]
- path: ["security", "secret_key"]
- path: ["security", "admin_password"]
- path: ["auth.basic", "password"]
- path: ["auth.ldap", "bind_password"]
- path: ["auth.google", "client_secret"]
- path: ["auth.github", "client_secret"]
- path: ["auth.gitlab", "client_secret"]
- path: ["auth.generic_oauth", "client_secret"]
- path: ["auth.okta", "client_secret"]
- path: ["auth.azuread", "client_secret"]
- path: ["auth.grafana_com", "client_secret"]
- path: ["auth.grafananet", "client_secret"]
- path: ["azure", "user_identity_client_secret"]
- path: ["unified_alerting", "ha_redis_password"]
- path: ["metrics", "basic_auth_password"]
- path: ["external_image_storage.s3", "secret_key"]
- path: ["external_image_storage.webdav", "password"]
- path: ["external_image_storage.azure_blob", "account_key"]
` | fromYaml -}}
{{- if $.Values.assertNoLeakedSecrets -}}
{{- $grafanaIni := index .Values "grafana.ini" -}}
{{- range $_, $secret := $sensitiveKeysYaml.sensitiveKeys -}}
{{- $currentMap := $grafanaIni -}}
{{- $shouldContinue := true -}}
{{- range $index, $elem := $secret.path -}}
{{- if and $shouldContinue (hasKey $currentMap $elem) -}}
{{- if eq (len $secret.path) (add1 $index) -}}
{{- if not (regexMatch "\\$(?:__(?:env|file|vault))?{[^}]+}" (index $currentMap $elem)) -}}
{{- fail (printf "Sensitive key '%s' should not be defined explicitly in values. Use variable expansion instead. You can disable this client-side validation by changing the value of assertNoLeakedSecrets." (join "." $secret.path)) -}}
{{- end -}}
{{- else -}}
{{- $currentMap = index $currentMap $elem -}}
{{- end -}}
{{- else -}}
{{- $shouldContinue = false -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
rules:
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end}}
{{- with .Values.rbac.extraClusterRoleRules }}
{{- toYaml . | nindent 2 }}
{{- end}}
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -0,0 +1,24 @@
{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "grafana.fullname" . }}-clusterrolebinding
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "grafana.serviceAccountName" . }}
namespace: {{ include "grafana.namespace" . }}
roleRef:
kind: ClusterRole
{{- if .Values.rbac.useExistingClusterRole }}
name: {{ .Values.rbac.useExistingClusterRole }}
{{- else }}
name: {{ include "grafana.fullname" . }}-clusterrole
{{- end }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,43 @@
{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}}
{{- if and .Values.createConfigmap $createConfigSecret }}
{{- $files := .Files }}
{{- $root := . -}}
apiVersion: v1
kind: Secret
metadata:
name: "{{ include "grafana.fullname" . }}-config-secret"
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
data:
{{- range $key, $value := .Values.alerting }}
{{- if (hasKey $value "secretFile") }}
{{- $key | nindent 2 }}:
{{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}}
{{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}}
{{- end }}
{{- end }}
stringData:
{{- range $key, $value := .Values.datasources }}
{{- if (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
{{ if (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "grafana.fullname" . }}-config-dashboards
namespace: {{ include "grafana.namespace" . }}
data:
{{- include "grafana.configDashboardProviderData" . | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.createConfigmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
data:
{{- include "grafana.configData" . | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if .Values.dashboards }}
{{ $files := .Files }}
{{- range $provider, $dashboards := .Values.dashboards }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }}
namespace: {{ include "grafana.namespace" $ }}
labels:
{{- include "grafana.labels" $ | nindent 4 }}
dashboard-provider: {{ $provider }}
{{- if $.Values.sidecar.dashboards.enabled }}
{{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }}
{{- end }}
{{- if $dashboards }}
data:
{{- $dashboardFound := false }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
{{- $dashboardFound = true }}
{{- print $key | nindent 2 }}.json:
{{- if hasKey $value "json" }}
|-
{{- $value.json | nindent 6 }}
{{- end }}
{{- if hasKey $value "file" }}
{{- toYaml ( $files.Get $value.file ) | nindent 4}}
{{- end }}
{{- end }}
{{- end }}
{{- if not $dashboardFound }}
{}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,53 @@
{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }}
replicas: {{ .Values.replicas }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- with .Values.deploymentStrategy }}
strategy:
{{- toYaml . | trim | nindent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include "grafana.configData" . | sha256sum }}
{{- if .Values.dashboards }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
{{- end }}
checksum/sc-dashboard-provider-config: {{ include "grafana.configDashboardProviderData" . | sha256sum }}
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
checksum/secret: {{ include "grafana.secretsData" . | sha256sum }}
{{- end }}
{{- if .Values.envRenderSecret }}
checksum/secret-env: {{ tpl (toYaml .Values.envRenderSecret) . | sha256sum }}
{{- end }}
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{ range .Values.extraObjects }}
---
{{ tpl (toYaml .) $ }}
{{ end }}

View File

@ -0,0 +1,22 @@
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "grafana.fullname" . }}-headless
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
clusterIP: None
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}
type: ClusterIP
ports:
- name: {{ .Values.gossipPortName }}-tcp
port: 9094
{{- end }}

View File

@ -0,0 +1,52 @@
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if .Values.autoscaling.enabled }}
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
app.kubernetes.io/name: {{ include "grafana.name" . }}
helm.sh/chart: {{ include "grafana.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
scaleTargetRef:
apiVersion: apps/v1
{{- if has .Values.persistence.type $sts }}
kind: StatefulSet
{{- else }}
kind: Deployment
{{- end }}
name: {{ include "grafana.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.behavior }}
behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,131 @@
{{ if .Values.imageRenderer.enabled }}
{{- $root := . -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- with .Values.imageRenderer.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.imageRenderer.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }}
replicas: {{ .Values.imageRenderer.replicas }}
{{- end }}
revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- with .Values.imageRenderer.deploymentStrategy }}
strategy:
{{- toYaml . | trim | nindent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }}
{{- with .Values.imageRenderer.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.imageRenderer.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imageRenderer.schedulerName }}
schedulerName: "{{ . }}"
{{- end }}
{{- with .Values.imageRenderer.serviceAccountName }}
serviceAccountName: "{{ . }}"
{{- end }}
{{- with .Values.imageRenderer.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imageRenderer.hostAliases }}
hostAliases:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imageRenderer.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.imageRenderer.image.pullSecrets }}
imagePullSecrets:
{{- range . }}
- name: {{ tpl . $root }}
{{- end}}
{{- end }}
containers:
- name: {{ .Chart.Name }}-image-renderer
{{- $registry := include "system_default_registry" | default .Values.imageRenderer.image.registry -}}
{{- if .Values.imageRenderer.image.sha }}
image: "{{ $registry }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}"
{{- else }}
image: "{{ $registry }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }}
{{- if .Values.imageRenderer.command }}
command:
{{- range .Values.imageRenderer.command }}
- {{ . }}
{{- end }}
{{- end}}
ports:
- name: {{ .Values.imageRenderer.service.portName }}
containerPort: {{ .Values.imageRenderer.service.targetPort }}
protocol: TCP
livenessProbe:
httpGet:
path: /
port: {{ .Values.imageRenderer.service.portName }}
env:
- name: HTTP_PORT
value: {{ .Values.imageRenderer.service.targetPort | quote }}
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
- name: ENABLE_METRICS
value: "true"
{{- end }}
{{- range $key, $value := .Values.imageRenderer.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{- tpl (toYaml $value) $ | nindent 16 }}
{{- end }}
{{- range $key, $value := .Values.imageRenderer.env }}
- name: {{ $key | quote }}
value: {{ $value | quote }}
{{- end }}
{{- with .Values.imageRenderer.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /tmp
name: image-renderer-tmpfs
{{- with .Values.imageRenderer.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.imageRenderer.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imageRenderer.affinity }}
affinity:
{{- tpl (toYaml .) $root | nindent 8 }}
{{- end }}
{{- with .Values.imageRenderer.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: image-renderer-tmpfs
emptyDir: {}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }}
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
namespace: {{ include "grafana.namespace" . }}
labels:
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
helm.sh/chart: {{ include "grafana.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "grafana.fullname" . }}-image-renderer
minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }}
maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }}
metrics:
{{- if .Values.imageRenderer.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- if .Values.imageRenderer.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.imageRenderer.autoscaling.behavior }}
behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,79 @@
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer-ingress
namespace: {{ include "grafana.namespace" . }}
annotations:
comment: Limit image-renderer ingress traffic from grafana
spec:
podSelector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- with .Values.imageRenderer.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Ingress
ingress:
- ports:
- port: {{ .Values.imageRenderer.service.targetPort }}
protocol: TCP
from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ include "grafana.namespace" . }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 14 }}
{{- end }}
{{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}}
{{ toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer-egress
namespace: {{ include "grafana.namespace" . }}
annotations:
comment: Limit image-renderer egress traffic to grafana
spec:
podSelector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
{{- with .Values.imageRenderer.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Egress
egress:
# allow dns resolution
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
# talk only to grafana
- ports:
- port: {{ .Values.service.targetPort }}
protocol: TCP
to:
- namespaceSelector:
matchLabels:
name: {{ include "grafana.namespace" . }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 14 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,31 @@
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- with .Values.imageRenderer.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.imageRenderer.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
{{- with .Values.imageRenderer.service.clusterIP }}
clusterIP: {{ . }}
{{- end }}
ports:
- name: {{ .Values.imageRenderer.service.portName }}
port: {{ .Values.imageRenderer.service.port }}
protocol: TCP
targetPort: {{ .Values.imageRenderer.service.targetPort }}
{{- with .Values.imageRenderer.appProtocol }}
appProtocol: {{ . }}
{{- end }}
selector:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
{{- if .Values.imageRenderer.serviceMonitor.namespace }}
namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- with .Values.imageRenderer.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: {{ .Values.imageRenderer.service.portName }}
{{- with .Values.imageRenderer.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.imageRenderer.serviceMonitor.path }}
scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }}
{{- with .Values.imageRenderer.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.imageRenderer.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}-image-renderer"
selector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ include "grafana.namespace" . }}
{{- with .Values.imageRenderer.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,78 @@
{{- if .Values.ingress.enabled -}}
{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "grafana.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $ingressPathType := .Values.ingress.pathType -}}
{{- $extraPaths := .Values.ingress.extraPaths -}}
apiVersion: {{ include "grafana.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.ingress.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ tpl $value $ | quote }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- with .Values.ingress.tls }}
tls:
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ tpl . $ | quote }}
http:
paths:
{{- with $extraPaths }}
{{- toYaml . | nindent 10 }}
{{- end }}
- path: {{ $ingressPath }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- end }}
{{- else }}
- http:
paths:
- backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- with $ingressPath }}
path: {{ . }}
{{- end }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
policyTypes:
{{- if .Values.networkPolicy.ingress }}
- Ingress
{{- end }}
{{- if .Values.networkPolicy.egress.enabled }}
- Egress
{{- end }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- if .Values.networkPolicy.egress.enabled }}
egress:
{{- if not .Values.networkPolicy.egress.blockDNSResolution }}
- ports:
- port: 53
protocol: UDP
{{- end }}
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- with .Values.networkPolicy.egress.to }}
to:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.networkPolicy.ingress }}
ingress:
- ports:
- port: {{ .Values.service.targetPort }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ include "grafana.fullname" . }}-client: "true"
{{- with .Values.networkPolicy.explicitNamespacesSelector }}
- namespaceSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
- podSelector:
matchLabels:
{{- include "grafana.labels" . | nindent 14 }}
role: read
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,94 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-nginx-proxy-config
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
nginx.conf: |-
worker_processes auto;
error_log /dev/stdout warn;
pid /var/cache/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)';
proxy_connect_timeout 10;
proxy_read_timeout 180;
proxy_send_timeout 5;
proxy_buffering off;
proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 8080;
access_log off;
gzip on;
gzip_min_length 1k;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
proxy_set_header Host $host;
location /api/dashboards {
proxy_pass http://localhost:3000;
}
location /api/search {
proxy_pass http://localhost:3000;
sub_filter_types application/json;
sub_filter_once off;
}
location /api/live/ {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $http_host;
proxy_pass http://localhost:3000;
}
location / {
proxy_cache my_zone;
proxy_cache_valid 200 302 1d;
proxy_cache_valid 301 30d;
proxy_cache_valid any 5m;
proxy_cache_bypass $http_cache_control;
add_header X-Proxy-Cache $upstream_cache_status;
add_header Cache-Control "public";
proxy_pass http://localhost:3000/;
sub_filter_once off;
{{- if eq .Values.global.cattle.clusterId "local" -}}
sub_filter '"appSubUrl":""' '"appSubUrl":"/api/v1/namespaces/{{ template "grafana.namespace" . }}/services/http:{{ template "grafana.fullname" . }}:{{ .Values.service.port }}/proxy"';
{{- else -}}
sub_filter '"appSubUrl":""' '"appSubUrl":"/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ template "grafana.namespace" . }}/services/http:{{ template "grafana.fullname" . }}:{{ .Values.service.port }}/proxy"';
{{- end -}}
sub_filter ':"/avatar/' ':"avatar/';
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
rewrite ^/k8s/clusters/.*/proxy(.*) /$1 break;
}
}
}

View File

@ -0,0 +1,22 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ . }}
{{- end }}
{{- with .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ . }}
{{- end }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,45 @@
{{- if and (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "grafana.fullname" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.rbac.pspAnnotations }}
annotations: {{ toYaml .Values.rbac.pspAnnotations | nindent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, with DAC_OVERRIDE and CHOWN
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'csi'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,41 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.persistence.extraPvcLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.persistence.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.persistence.finalizers }}
finalizers:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
accessModes:
{{- $_ := required "Must provide at least one access mode for persistent volumes used by Grafana" .Values.persistence.accessModes }}
{{- $_ := required "Must provide at least one access mode for persistent volumes used by Grafana" (first .Values.persistence.accessModes) }}
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if (lookup "v1" "PersistentVolumeClaim" (include "grafana.namespace" .) (include "grafana.fullname" .)) }}
volumeName: {{ (lookup "v1" "PersistentVolumeClaim" (include "grafana.namespace" .) (include "grafana.fullname" .)).spec.volumeName }}
{{- end }}
{{- with .Values.persistence.storageClassName }}
storageClassName: {{ . }}
{{- end }}
{{- with .Values.persistence.selectorLabels }}
selector:
matchLabels:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,32 @@
{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if or (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }}
rules:
{{- if and (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ include "grafana.fullname" . }}]
{{- end }}
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end }}
{{- with .Values.rbac.extraRoleRules }}
{{- toYaml . | nindent 2 }}
{{- end}}
{{- else }}
rules: []
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
{{- if .Values.rbac.useExistingRole }}
name: {{ .Values.rbac.useExistingRole }}
{{- else }}
name: {{ include "grafana.fullname" . }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "grafana.serviceAccountName" . }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if .Values.envRenderSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "grafana.fullname" . }}-env
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $key, $val := .Values.envRenderSecret }}
{{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- include "grafana.secretsData" . | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.service.enabled }}
{{- $root := . }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- tpl (toYaml . | nindent 4) $root }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- with .Values.service.clusterIP }}
clusterIP: {{ . }}
{{- end }}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: LoadBalancer
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerClass }}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- with .Values.service.externalIPs }}
externalIPs:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ . }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
protocol: TCP
targetPort: {{ .Values.service.targetPort }}
{{- with .Values.service.appProtocol }}
appProtocol: {{ . }}
{{- end }}
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- with .Values.extraExposePorts }}
{{- tpl (toYaml . | nindent 4) $root }}
{{- end }}
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.serviceAccount.autoMount | default .Values.serviceAccount.automountServiceAccountToken }}
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
name: {{ include "grafana.serviceAccountName" . }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,68 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "grafana.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
spec:
endpoints:
- port: {{ .Values.service.portName }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
metricRelabelings:
{{- if .Values.serviceMonitor.metricRelabelings }}
{{- toYaml .Values.serviceMonitor.metricRelabelings | nindent 6 }}
{{- end }}
{{ if .Values.global.cattle.clusterId }}
- sourceLabels: [__address__]
targetLabel: cluster_id
replacement: {{ .Values.global.cattle.clusterId }}
{{- end }}
{{ if .Values.global.cattle.clusterName }}
- sourceLabels: [__address__]
targetLabel: cluster_name
replacement: {{ .Values.global.cattle.clusterName }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ include "grafana.namespace" . }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,58 @@
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "grafana.fullname" . }}
namespace: {{ include "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
serviceName: {{ include "grafana.fullname" . }}-headless
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- if .Values.persistence.enabled}}
volumeClaimTemplates:
- metadata:
name: storage
spec:
{{- $_ := required "Must provide at least one access mode for persistent volumes used by Grafana" .Values.persistence.accessModes }}
{{- $_ := required "Must provide at least one access mode for persistent volumes used by Grafana" (first .Values.persistence.accessModes) }}
accessModes: {{ .Values.persistence.accessModes }}
storageClassName: {{ .Values.persistence.storageClassName }}
resources:
requests:
storage: {{ required "Must provide size for persistent volumes used by Grafana" .Values.persistence.size }}
{{- with .Values.persistence.selectorLabels }}
selector:
matchLabels:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "grafana.fullname" . }}-test
namespace: {{ include "grafana.namespace" . }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
run.sh: |-
@test "Test Health" {
url="http://{{ include "grafana.fullname" . }}/api/health"
code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}')
[ "$code" == "200" ]
}
{{- end }}

View File

@ -0,0 +1,32 @@
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "grafana.fullname" . }}-test
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
labels:
{{- include "grafana.labels" . | nindent 4 }}
spec:
allowPrivilegeEscalation: true
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
fsGroup:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- projected
- csi
- secret
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "grafana.fullname" . }}-test
namespace: {{ include "grafana.namespace" . }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
labels:
{{- include "grafana.labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ include "grafana.fullname" . }}-test]
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled (or .Values.global.cattle.psp.enabled .Values.rbac.pspEnabled) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "grafana.fullname" . }}-test
namespace: {{ include "grafana.namespace" . }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
labels:
{{- include "grafana.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "grafana.fullname" . }}-test
subjects:
- kind: ServiceAccount
name: {{ include "grafana.serviceAccountNameTest" . }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
name: {{ include "grafana.serviceAccountNameTest" . }}
namespace: {{ include "grafana.namespace" . }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
{{- end }}

View File

@ -0,0 +1,53 @@
{{- if .Values.testFramework.enabled }}
{{- $root := . }}
apiVersion: v1
kind: Pod
metadata:
name: {{ include "grafana.fullname" . }}-test
labels:
{{- include "grafana.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
namespace: {{ include "grafana.namespace" . }}
spec:
serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }}
{{- with .Values.testFramework.securityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- tpl (toYaml .) $root | nindent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
containers:
- name: {{ .Release.Name }}-test
image: "{{ template "system_default_registry" . | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}"
imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}"
command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
{{- with .Values.testFramework.resources }}
resources:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: tests
configMap:
name: {{ include "grafana.fullname" . }}-test
restartPolicy: Never
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-pushprox
apiVersion: v1
appVersion: 0.1.0
description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx
clients.
kubeVersion: '>=1.26.0-0'
name: hardenedKubelet
type: application
version: 0.2.0

View File

@ -0,0 +1,90 @@
# rancher-pushprox
A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster.
Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy.
Using an instance of this chart is suitable for the following scenarios:
- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster)
- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics)
- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath`
- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`)
- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`)
The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Configuration
The following tables list the configurable parameters of the rancher-pushprox chart and their default values.
### General
#### Required
| Parameter | Description | Example |
| ----- | ----------- | ------ |
| `component` | The component that is being monitored | `kube-etcd`
| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://<HOST_IP>:<metricsPort>/metrics`) | `2379` |
| `namespaceOverride` | The namespace to install the chart | `""`
#### Optional
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` |
| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` |
| `service.selector` | The selector that is used to populate the Service's Endpoints object. The chart will error out on rendering templating if `.Values.clients.enabled` is set alongside this field, since it is expected that this service should point to the PushProx Clients Daemonset / Deployment | `{}` |
| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` |
| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` |
| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` |
| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` |
| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.forceHTTPSScheme` | Forces scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.enabled` | If set to true, the client will use service account credentials mounted at the configured path `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath`. This requires permissions to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath` | This is a volume mount on the pod with permissions to scrape `/metrics` endpoint of Kubernetes components | `"/var/run/secrets/kubernetes.io/serviceaccount/token"` |
| `clients.https.authenticationMethod.bearerTokenSecret.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.authorization.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components | `false` |
| `clients.https.authenticationMethod.authorization.type` | If set, the client will use this type of authorization in its client requests for metrics | `"bearer"` |
| `clients.https.authenticationMethod.authorization.credentials.key` | If set, the client will use this key in the secret created by `clients.https.useServiceAccountCredentials` for authorization in its client requests for metrics | `"token"` |
| `clients.https.authenticationMethod.authorization.credentials.optional` | If set to false, the client will fail if the key in the secret created by `clients.https.useServiceAccountCredentials` does not exist | `false` |
| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` |
| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.seLinuxOptions` | seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host. Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided. | `""` |
| `clients.metrics.enabled` | Whether the client should publish PushProx client-specific metrics. | `false` |
| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` |
| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` |
| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` |
| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` |
| `clients.resources` | Set resource limits and requests for the client container | `{}` |
| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` |
| `clients.tolerations` | Specify tolerations for clients | `[]` |
| `proxy.enabled` | Deploys the proxy that each client will register with | `true` |
| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` |
| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` |
| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` |
| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` |
| `kubeVersionOverrides` | A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides. For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches any of the semver constraints provided as keys on the map. On seeing a match, the default value for each values.yaml field overridden will be updated with the new value. If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order. | `[]`
*Tip: The filepaths set in `clients.https.<cert|key|caCert>File` can include wildcard characters*.
See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used.

View File

@ -0,0 +1,170 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
# General
{{- define "applyKubeVersionOverrides" -}}
{{- $overrides := dict -}}
{{- range $override := .Values.kubeVersionOverrides -}}
{{- if semverCompare $override.constraint $.Capabilities.KubeVersion.Version -}}
{{- $_ := mergeOverwrite $overrides $override.values -}}
{{- end -}}
{{- end -}}
{{- $_ := mergeOverwrite .Values $overrides -}}
{{- end -}}
{{- define "pushprox.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{- define "pushProxy.commonLabels" -}}
release: {{ .Release.Name }}
component: {{ .Values.component | quote }}
provider: kubernetes
{{- end -}}
{{- define "pushProxy.proxyUrl" -}}
{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}}
{{- if .Values.clients.proxyUrl -}}
{{ printf "%s" .Values.clients.proxyUrl }}
{{- else -}}
{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }}
{{- end -}}{{- end -}}
# Client
{{- define "pushProxy.client.name" -}}
{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.serviceAccountTokenName" -}}
{{- printf "pushprox-%s-client-service-account-token" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.labels" -}}
k8s-app: {{ template "pushProxy.client.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# Proxy
{{- define "pushProxy.proxy.name" -}}
{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.proxy.labels" -}}
k8s-app: {{ template "pushProxy.proxy.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# ServiceMonitor
{{- define "pushprox.serviceMonitor.name" -}}
{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.serviceMonitor.labels" -}}
app: {{ template "pushprox.serviceMonitor.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
{{- define "pushProxy.serviceMonitor.endpoints" -}}
{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}}
{{- $useHTTPS := .Values.clients.https.enabled -}}
{{- $setHTTPSScheme := .Values.clients.https.forceHTTPSScheme -}}
{{- $insecureSkipVerify := .Values.clients.https.insecureSkipVerify -}}
{{- $useServiceAccountCredentials := .Values.clients.https.useServiceAccountCredentials -}}
{{- $serviceAccountTokenName := (include "pushProxy.client.serviceAccountTokenName" . ) -}}
{{- $metricRelabelings := list }}
{{- $endpoints := .Values.serviceMonitor.endpoints }}
{{- if .Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- range $endpoints }}
{{- if $.Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- $clusterIdRelabel := dict }}
{{- $metricRelabelings := list }}
{{- if $.Values.global.cattle.clusterId }}
{{- $_ := set $clusterIdRelabel "action" "replace" }}
{{- $_ := set $clusterIdRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterIdRelabel "targetLabel" "cluster_id" }}
{{- $_ := set $clusterIdRelabel "replacement" $.Values.global.cattle.clusterId }}
{{- $metricRelabelings = append $metricRelabelings $clusterIdRelabel }}
{{- end }}
{{- $clusterNameRelabel := dict }}
{{- if $.Values.global.cattle.clusterName }}
{{- $_ := set $clusterNameRelabel "action" "replace" }}
{{- $_ := set $clusterNameRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterNameRelabel "targetLabel" "cluster_name" }}
{{- $_ := set $clusterNameRelabel "replacement" $.Values.global.cattle.clusterName }}
{{- $metricRelabelings = append $metricRelabelings $clusterNameRelabel }}
{{- end }}
{{- if not (empty $metricRelabelings) }}
{{- $_ := set . "metricRelabelings" ($metricRelabelings)}}
{{- end }}
{{- if $setHTTPSScheme -}}
{{- $_ := set . "scheme" "https" }}
{{- end -}}
{{- if $useHTTPS -}}
{{- if (hasKey . "params") }}
{{- $_ := set (get . "params") "_scheme" (list "https") }}
{{- else }}
{{- $_ := set . "params" (dict "_scheme" (list "https")) }}
{{- end }}
{{- end }}
{{- if (hasKey . "tlsConfig") }}
{{- $_ := set (get . "tlsConfig") "insecureSkipVerify" $insecureSkipVerify }}
{{- else }}
{{- $_ := set . "tlsConfig" (dict "insecureSkipVerify" $insecureSkipVerify) }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenFile.enabled }}
{{- $_ := set . "bearerTokenFile" $.Values.clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenSecret.enabled }}
{{- $_ := set . "bearerTokenSecret" $serviceAccountTokenName }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.authorization.enabled }}
{{- if (hasKey . "authorization") }}
{{- $_ := set (get . "authorization") "type" $.Values.clients.https.authenticationMethod.authorization.type }}
{{- $_ := set (get . "authorization") "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional) }}
{{- else }}
{{- $_ := set . "authorization" (dict "type" $.Values.clients.https.authenticationMethod.authorization.type) }}
{{- $_ := set . "authorization" (dict "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional)) }}
{{- end }}
{{- end }}
{{- end }}
{{- toYaml $endpoints }}
{{- end -}}

View File

@ -0,0 +1,97 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.client.name" . }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- if .Values.clients.rbac.additionalRules }}
{{ toYaml .Values.clients.rbac.additionalRules }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
---
{{- if .Values.clients.https.useServiceAccountCredentials }}
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: {{ template "pushProxy.client.serviceAccountTokenName" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
annotations:
kubernetes.io/service-account.name: {{ template "pushProxy.client.name" . }}
{{- end }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 6 }}
{{- end }}
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
- 'emptyDir'
- 'hostPath'
allowedHostPaths:
- pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,157 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: apps/v1
{{- if .Values.clients.deployment.enabled }}
kind: Deployment
{{- else }}
kind: DaemonSet
{{- end }}
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
pushprox-exporter: "client"
spec:
{{- if .Values.clients.deployment.enabled }}
replicas: {{ .Values.clients.deployment.replicas }}
{{- end }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.client.labels" . | nindent 8 }}
spec:
{{- if .Values.clients.affinity }}
affinity: {{ toYaml .Values.clients.affinity | nindent 8 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.clients.nodeSelector }}
{{ toYaml .Values.clients.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.clients.tolerations }}
{{ toYaml .Values.clients.tolerations | indent 8 }}
{{- end }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ template "pushProxy.client.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-client
image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }}
command:
{{- range .Values.clients.command }}
- {{ . | quote }}
{{- end }}
args:
- --fqdn=$(HOST_IP)
- --proxy-url=$(PROXY_URL)
{{- if .Values.clients.metrics.enabled }}
- --metrics-addr=$(PORT)
{{- end }}
- --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}}
{{- if .Values.clients.useLocalhost }}
- --use-localhost
{{- end }}
{{- if .Values.clients.https.enabled }}
{{- if .Values.clients.https.insecureSkipVerify }}
- --insecure-skip-verify
{{- end }}
{{- if .Values.clients.https.useServiceAccountCredentials }}
- --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
{{- end }}
{{- if .Values.clients.https.certDir }}
- --tls.cert=/etc/ssl/push-proxy/push-proxy.pem
- --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem
- --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem
{{- end }}
{{- end }}
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{- if .Values.clients.metrics.enabled }}
- name: PORT
value: :{{ .Values.clients.port }}
{{- end }}
- name: PROXY_URL
value: {{ template "pushProxy.proxyUrl" . }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumeMounts:
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
{{- end }}
{{- if .Values.clients.resources }}
resources: {{ toYaml .Values.clients.resources | nindent 10 }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
initContainers:
- name: copy-certs
image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }}
command:
- sh
- -c
- |
echo "Searching for files to copy within the source volume"
echo "cert: ${CERT_FILE_NAME}"
echo "key: ${KEY_FILE_NAME}"
echo "cacert: ${CACERT_FILE_NAME}"
CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1)
KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1)
CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1)
test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1
test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1
test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1
echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET"
cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1
chmod 444 $CERT_FILE_TARGET || exit 1
echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET"
cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1
chmod 444 $KEY_FILE_TARGET || exit 1
echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET"
cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1
chmod 444 $CACERT_FILE_TARGET || exit 1
env:
- name: CERT_FILE_NAME
value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }}
- name: KEY_FILE_NAME
value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }}
- name: CACERT_FILE_NAME
value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }}
- name: CERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy.pem
- name: KEY_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-key.pem
- name: CACERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem
securityContext:
runAsNonRoot: false
{{- if and .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 12 }}
{{- end }}
volumeMounts:
- name: metrics-cert-dir-source
mountPath: /etc/source
readOnly: true
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
volumes:
- name: metrics-cert-dir-source
hostPath:
path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
- name: metrics-cert-dir
emptyDir: {}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,68 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.proxy.name" . }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.proxy.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- end }}{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
pushprox-exporter: "proxy"
spec:
selector:
matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }}
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.proxy.nodeSelector }}
{{ toYaml .Values.proxy.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.proxy.tolerations }}
{{ toYaml .Values.proxy.tolerations | indent 8 }}
{{- end }}
serviceAccountName: {{ template "pushProxy.proxy.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-proxy
image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}
command:
{{- range .Values.proxy.command }}
- {{ . | quote }}
{{- end }}
{{- if .Values.proxy.resources }}
resources: {{ toYaml .Values.proxy.resources | nindent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
ports:
- name: pp-proxy
port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }}
protocol: TCP
targetPort: {{ .Values.proxy.port }}
selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,45 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "pushprox.serviceMonitor.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }}
spec:
endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }}
jobLabel: component
podTargetLabels:
- component
- pushprox-exporter
namespaceSelector:
matchNames:
- {{ template "pushprox.namespace" . }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
---
{{- $selector := "" }}
{{- if not (kindIs "invalid" .Values.service) }}
{{- if not (kindIs "invalid" .Values.service.selector) }}
{{ if .Values.service.selector }}
{{- if .Values.clients.enabled }}
{{- required (printf "Cannot override .Values.service.selector=%s when .Values.clients.enabled=true" (toJson .Values.service.selector)) "" }}
{{- end }}
{{- $selector = (toYaml .Values.service.selector) }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
ports:
- name: metrics
port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}}
protocol: TCP
targetPort: {{ .Values.metricsPort }}
selector: {{ default (include "pushProxy.client.labels" .) $selector | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,14 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "monitoring.coreos.com/v1/ServiceMonitor" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install Prometheus Operator CRDs before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,166 @@
# Default values for rancher-pushprox.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Default image containing both the proxy and the client was generated from the following Dockerfile
# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15
# Configuration
global:
cattle:
psp:
enabled: false
systemDefaultRegistry: ""
seLinux:
enabled: false
# A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides.
#
# For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches
# any of the semver constraints provided as keys on the map.
#
# On seeing a match, the default value for each values.yaml field overridden will be updated with the new value.
#
# If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order.
#
# Notes:
# - On running a helm template, Helm generally assumes the kubeVersion is v1.20.0
# - On running a helm install --dry-run, the correct kubeVersion should be chosen.
kubeVersionOverrides: []
# - constraint: "< 1.21"
# values:
# metricsPort: 10252
# clients:
# https:
# enabled: false
# insecureSkipVerify: false
# useServiceAccountCredentials: false
namespaceOverride: ""
# The component that is being monitored (i.e. etcd)
component: "component"
# The port containing the metrics that need to be scraped
metricsPort: 2739
# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint
serviceMonitor:
enabled: true
# A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec
# Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
# By default, proxyUrl and params._scheme will be overridden based on other values
endpoints:
- port: metrics
# Configure Service that grabs scrape targets
service:
# The selector that is used to populate the Service's Endpoints object.
# The chart will error out on rendering templating if .Values.clients.enabled is set alongside this field,
# since it is expected that this service should point to the PushProx Clients Daemonset / Deployment
selector: {}
clients:
enabled: true
# The port which the PushProx client will post PushProx metrics to
port: 9369
# If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}}
# Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null
proxyUrl: ""
# If set to true, the client will forward any requests from the host IP to 127.0.0.1
# It will only allow proxy requests to the metricsPort specified
useLocalhost: false
# Configuration for accessing metrics via HTTPS
https:
# Does the client require https to access the metrics?
enabled: false
# Does the client require requests be sent to http or https?
forceHTTPSScheme: false
# If set to true, the client will create a service account with adequate permissions and set a flag
# on the client to use the service account token provided by it to make authorized scrape requests
useServiceAccountCredentials: false
# Configuration for authentication to metrics via https endpoint
authenticationMethod:
# Reads token from defined file in container
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenFile:
enabled: false
bearerTokenFilePath: "/var/run/secrets/kubernetes.io/serviceaccount/token"
# Reads token from defined secret in namespace
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenSecret:
enabled: false
# Reads token from defined secret in namespace
authorization:
enabled: false
type: "bearer"
credentials:
key: "token"
optional: false
# If set to true, the client will disable SSL security checks
insecureSkipVerify: false
# Directory on host where necessary TLS cert and key to scrape metrics can be found
certDir: ""
# Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings
certFile: ""
keyFile: ""
caCertFile: ""
# seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host.
# Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided.
seLinuxOptions: {}
metrics:
# Whether the client should publish PushProx client-specific metrics to .Values.clients.port
enabled: false
rbac:
# Additional permissions to provide to the ServiceAccount bound to the client
# This can be used to provide additional permissions for the client to scrape metrics from the k8s API
# Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true
additionalRules: []
# Resource limits
resources: {}
# Options to select all nodes to deploy client DaemonSet on
nodeSelector: {}
tolerations: []
affinity: {}
image:
repository: rancher/pushprox-client
tag: v0.1.3-rancher2-client
command: ["pushprox-client"]
copyCertsImage:
repository: rancher/mirrored-library-busybox
tag: 1.31.1
# The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes.
# This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in
# situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod.
# However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment,
# this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet.
# If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will
# be responsible for upgrading this chart accordingly to the right number of replicas.
deployment:
enabled: false
replicas: 0
proxy:
enabled: true
# The port through which PushProx clients will communicate to the proxy
port: 8080
# Resource limits
resources: {}
# Options to select a node to run a single proxy deployment on
nodeSelector: {}
tolerations: []
image:
repository: rancher/pushprox-proxy
tag: v0.1.3-rancher2-proxy
command: ["pushprox-proxy"]

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-pushprox
apiVersion: v1
appVersion: 0.1.0
description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx
clients.
kubeVersion: '>=1.26.0-0'
name: hardenedNodeExporter
type: application
version: 0.2.0

View File

@ -0,0 +1,90 @@
# rancher-pushprox
A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster.
Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy.
Using an instance of this chart is suitable for the following scenarios:
- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster)
- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics)
- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath`
- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`)
- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`)
The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Configuration
The following tables list the configurable parameters of the rancher-pushprox chart and their default values.
### General
#### Required
| Parameter | Description | Example |
| ----- | ----------- | ------ |
| `component` | The component that is being monitored | `kube-etcd`
| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://<HOST_IP>:<metricsPort>/metrics`) | `2379` |
| `namespaceOverride` | The namespace to install the chart | `""`
#### Optional
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` |
| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` |
| `service.selector` | The selector that is used to populate the Service's Endpoints object. The chart will error out on rendering templating if `.Values.clients.enabled` is set alongside this field, since it is expected that this service should point to the PushProx Clients Daemonset / Deployment | `{}` |
| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` |
| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` |
| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` |
| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` |
| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.forceHTTPSScheme` | Forces scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.enabled` | If set to true, the client will use service account credentials mounted at the configured path `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath`. This requires permissions to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath` | This is a volume mount on the pod with permissions to scrape `/metrics` endpoint of Kubernetes components | `"/var/run/secrets/kubernetes.io/serviceaccount/token"` |
| `clients.https.authenticationMethod.bearerTokenSecret.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.authorization.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components | `false` |
| `clients.https.authenticationMethod.authorization.type` | If set, the client will use this type of authorization in its client requests for metrics | `"bearer"` |
| `clients.https.authenticationMethod.authorization.credentials.key` | If set, the client will use this key in the secret created by `clients.https.useServiceAccountCredentials` for authorization in its client requests for metrics | `"token"` |
| `clients.https.authenticationMethod.authorization.credentials.optional` | If set to false, the client will fail if the key in the secret created by `clients.https.useServiceAccountCredentials` does not exist | `false` |
| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` |
| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.seLinuxOptions` | seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host. Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided. | `""` |
| `clients.metrics.enabled` | Whether the client should publish PushProx client-specific metrics. | `false` |
| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` |
| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` |
| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` |
| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` |
| `clients.resources` | Set resource limits and requests for the client container | `{}` |
| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` |
| `clients.tolerations` | Specify tolerations for clients | `[]` |
| `proxy.enabled` | Deploys the proxy that each client will register with | `true` |
| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` |
| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` |
| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` |
| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` |
| `kubeVersionOverrides` | A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides. For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches any of the semver constraints provided as keys on the map. On seeing a match, the default value for each values.yaml field overridden will be updated with the new value. If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order. | `[]`
*Tip: The filepaths set in `clients.https.<cert|key|caCert>File` can include wildcard characters*.
See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used.

View File

@ -0,0 +1,170 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
# General
{{- define "applyKubeVersionOverrides" -}}
{{- $overrides := dict -}}
{{- range $override := .Values.kubeVersionOverrides -}}
{{- if semverCompare $override.constraint $.Capabilities.KubeVersion.Version -}}
{{- $_ := mergeOverwrite $overrides $override.values -}}
{{- end -}}
{{- end -}}
{{- $_ := mergeOverwrite .Values $overrides -}}
{{- end -}}
{{- define "pushprox.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{- define "pushProxy.commonLabels" -}}
release: {{ .Release.Name }}
component: {{ .Values.component | quote }}
provider: kubernetes
{{- end -}}
{{- define "pushProxy.proxyUrl" -}}
{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}}
{{- if .Values.clients.proxyUrl -}}
{{ printf "%s" .Values.clients.proxyUrl }}
{{- else -}}
{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }}
{{- end -}}{{- end -}}
# Client
{{- define "pushProxy.client.name" -}}
{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.serviceAccountTokenName" -}}
{{- printf "pushprox-%s-client-service-account-token" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.labels" -}}
k8s-app: {{ template "pushProxy.client.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# Proxy
{{- define "pushProxy.proxy.name" -}}
{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.proxy.labels" -}}
k8s-app: {{ template "pushProxy.proxy.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# ServiceMonitor
{{- define "pushprox.serviceMonitor.name" -}}
{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.serviceMonitor.labels" -}}
app: {{ template "pushprox.serviceMonitor.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
{{- define "pushProxy.serviceMonitor.endpoints" -}}
{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}}
{{- $useHTTPS := .Values.clients.https.enabled -}}
{{- $setHTTPSScheme := .Values.clients.https.forceHTTPSScheme -}}
{{- $insecureSkipVerify := .Values.clients.https.insecureSkipVerify -}}
{{- $useServiceAccountCredentials := .Values.clients.https.useServiceAccountCredentials -}}
{{- $serviceAccountTokenName := (include "pushProxy.client.serviceAccountTokenName" . ) -}}
{{- $metricRelabelings := list }}
{{- $endpoints := .Values.serviceMonitor.endpoints }}
{{- if .Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- range $endpoints }}
{{- if $.Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- $clusterIdRelabel := dict }}
{{- $metricRelabelings := list }}
{{- if $.Values.global.cattle.clusterId }}
{{- $_ := set $clusterIdRelabel "action" "replace" }}
{{- $_ := set $clusterIdRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterIdRelabel "targetLabel" "cluster_id" }}
{{- $_ := set $clusterIdRelabel "replacement" $.Values.global.cattle.clusterId }}
{{- $metricRelabelings = append $metricRelabelings $clusterIdRelabel }}
{{- end }}
{{- $clusterNameRelabel := dict }}
{{- if $.Values.global.cattle.clusterName }}
{{- $_ := set $clusterNameRelabel "action" "replace" }}
{{- $_ := set $clusterNameRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterNameRelabel "targetLabel" "cluster_name" }}
{{- $_ := set $clusterNameRelabel "replacement" $.Values.global.cattle.clusterName }}
{{- $metricRelabelings = append $metricRelabelings $clusterNameRelabel }}
{{- end }}
{{- if not (empty $metricRelabelings) }}
{{- $_ := set . "metricRelabelings" ($metricRelabelings)}}
{{- end }}
{{- if $setHTTPSScheme -}}
{{- $_ := set . "scheme" "https" }}
{{- end -}}
{{- if $useHTTPS -}}
{{- if (hasKey . "params") }}
{{- $_ := set (get . "params") "_scheme" (list "https") }}
{{- else }}
{{- $_ := set . "params" (dict "_scheme" (list "https")) }}
{{- end }}
{{- end }}
{{- if (hasKey . "tlsConfig") }}
{{- $_ := set (get . "tlsConfig") "insecureSkipVerify" $insecureSkipVerify }}
{{- else }}
{{- $_ := set . "tlsConfig" (dict "insecureSkipVerify" $insecureSkipVerify) }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenFile.enabled }}
{{- $_ := set . "bearerTokenFile" $.Values.clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenSecret.enabled }}
{{- $_ := set . "bearerTokenSecret" $serviceAccountTokenName }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.authorization.enabled }}
{{- if (hasKey . "authorization") }}
{{- $_ := set (get . "authorization") "type" $.Values.clients.https.authenticationMethod.authorization.type }}
{{- $_ := set (get . "authorization") "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional) }}
{{- else }}
{{- $_ := set . "authorization" (dict "type" $.Values.clients.https.authenticationMethod.authorization.type) }}
{{- $_ := set . "authorization" (dict "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional)) }}
{{- end }}
{{- end }}
{{- end }}
{{- toYaml $endpoints }}
{{- end -}}

View File

@ -0,0 +1,97 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.client.name" . }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- if .Values.clients.rbac.additionalRules }}
{{ toYaml .Values.clients.rbac.additionalRules }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
---
{{- if .Values.clients.https.useServiceAccountCredentials }}
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: {{ template "pushProxy.client.serviceAccountTokenName" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
annotations:
kubernetes.io/service-account.name: {{ template "pushProxy.client.name" . }}
{{- end }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 6 }}
{{- end }}
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
- 'emptyDir'
- 'hostPath'
allowedHostPaths:
- pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,157 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: apps/v1
{{- if .Values.clients.deployment.enabled }}
kind: Deployment
{{- else }}
kind: DaemonSet
{{- end }}
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
pushprox-exporter: "client"
spec:
{{- if .Values.clients.deployment.enabled }}
replicas: {{ .Values.clients.deployment.replicas }}
{{- end }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.client.labels" . | nindent 8 }}
spec:
{{- if .Values.clients.affinity }}
affinity: {{ toYaml .Values.clients.affinity | nindent 8 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.clients.nodeSelector }}
{{ toYaml .Values.clients.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.clients.tolerations }}
{{ toYaml .Values.clients.tolerations | indent 8 }}
{{- end }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ template "pushProxy.client.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-client
image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }}
command:
{{- range .Values.clients.command }}
- {{ . | quote }}
{{- end }}
args:
- --fqdn=$(HOST_IP)
- --proxy-url=$(PROXY_URL)
{{- if .Values.clients.metrics.enabled }}
- --metrics-addr=$(PORT)
{{- end }}
- --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}}
{{- if .Values.clients.useLocalhost }}
- --use-localhost
{{- end }}
{{- if .Values.clients.https.enabled }}
{{- if .Values.clients.https.insecureSkipVerify }}
- --insecure-skip-verify
{{- end }}
{{- if .Values.clients.https.useServiceAccountCredentials }}
- --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
{{- end }}
{{- if .Values.clients.https.certDir }}
- --tls.cert=/etc/ssl/push-proxy/push-proxy.pem
- --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem
- --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem
{{- end }}
{{- end }}
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{- if .Values.clients.metrics.enabled }}
- name: PORT
value: :{{ .Values.clients.port }}
{{- end }}
- name: PROXY_URL
value: {{ template "pushProxy.proxyUrl" . }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumeMounts:
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
{{- end }}
{{- if .Values.clients.resources }}
resources: {{ toYaml .Values.clients.resources | nindent 10 }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
initContainers:
- name: copy-certs
image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }}
command:
- sh
- -c
- |
echo "Searching for files to copy within the source volume"
echo "cert: ${CERT_FILE_NAME}"
echo "key: ${KEY_FILE_NAME}"
echo "cacert: ${CACERT_FILE_NAME}"
CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1)
KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1)
CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1)
test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1
test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1
test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1
echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET"
cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1
chmod 444 $CERT_FILE_TARGET || exit 1
echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET"
cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1
chmod 444 $KEY_FILE_TARGET || exit 1
echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET"
cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1
chmod 444 $CACERT_FILE_TARGET || exit 1
env:
- name: CERT_FILE_NAME
value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }}
- name: KEY_FILE_NAME
value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }}
- name: CACERT_FILE_NAME
value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }}
- name: CERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy.pem
- name: KEY_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-key.pem
- name: CACERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem
securityContext:
runAsNonRoot: false
{{- if and .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 12 }}
{{- end }}
volumeMounts:
- name: metrics-cert-dir-source
mountPath: /etc/source
readOnly: true
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
volumes:
- name: metrics-cert-dir-source
hostPath:
path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
- name: metrics-cert-dir
emptyDir: {}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,68 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.proxy.name" . }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.proxy.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- end }}{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
pushprox-exporter: "proxy"
spec:
selector:
matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }}
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.proxy.nodeSelector }}
{{ toYaml .Values.proxy.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.proxy.tolerations }}
{{ toYaml .Values.proxy.tolerations | indent 8 }}
{{- end }}
serviceAccountName: {{ template "pushProxy.proxy.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-proxy
image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}
command:
{{- range .Values.proxy.command }}
- {{ . | quote }}
{{- end }}
{{- if .Values.proxy.resources }}
resources: {{ toYaml .Values.proxy.resources | nindent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
ports:
- name: pp-proxy
port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }}
protocol: TCP
targetPort: {{ .Values.proxy.port }}
selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,45 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "pushprox.serviceMonitor.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }}
spec:
endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }}
jobLabel: component
podTargetLabels:
- component
- pushprox-exporter
namespaceSelector:
matchNames:
- {{ template "pushprox.namespace" . }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
---
{{- $selector := "" }}
{{- if not (kindIs "invalid" .Values.service) }}
{{- if not (kindIs "invalid" .Values.service.selector) }}
{{ if .Values.service.selector }}
{{- if .Values.clients.enabled }}
{{- required (printf "Cannot override .Values.service.selector=%s when .Values.clients.enabled=true" (toJson .Values.service.selector)) "" }}
{{- end }}
{{- $selector = (toYaml .Values.service.selector) }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
ports:
- name: metrics
port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}}
protocol: TCP
targetPort: {{ .Values.metricsPort }}
selector: {{ default (include "pushProxy.client.labels" .) $selector | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,14 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "monitoring.coreos.com/v1/ServiceMonitor" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install Prometheus Operator CRDs before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,166 @@
# Default values for rancher-pushprox.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Default image containing both the proxy and the client was generated from the following Dockerfile
# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15
# Configuration
global:
cattle:
psp:
enabled: false
systemDefaultRegistry: ""
seLinux:
enabled: false
# A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides.
#
# For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches
# any of the semver constraints provided as keys on the map.
#
# On seeing a match, the default value for each values.yaml field overridden will be updated with the new value.
#
# If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order.
#
# Notes:
# - On running a helm template, Helm generally assumes the kubeVersion is v1.20.0
# - On running a helm install --dry-run, the correct kubeVersion should be chosen.
kubeVersionOverrides: []
# - constraint: "< 1.21"
# values:
# metricsPort: 10252
# clients:
# https:
# enabled: false
# insecureSkipVerify: false
# useServiceAccountCredentials: false
namespaceOverride: ""
# The component that is being monitored (i.e. etcd)
component: "component"
# The port containing the metrics that need to be scraped
metricsPort: 2739
# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint
serviceMonitor:
enabled: true
# A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec
# Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
# By default, proxyUrl and params._scheme will be overridden based on other values
endpoints:
- port: metrics
# Configure Service that grabs scrape targets
service:
# The selector that is used to populate the Service's Endpoints object.
# The chart will error out on rendering templating if .Values.clients.enabled is set alongside this field,
# since it is expected that this service should point to the PushProx Clients Daemonset / Deployment
selector: {}
clients:
enabled: true
# The port which the PushProx client will post PushProx metrics to
port: 9369
# If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}}
# Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null
proxyUrl: ""
# If set to true, the client will forward any requests from the host IP to 127.0.0.1
# It will only allow proxy requests to the metricsPort specified
useLocalhost: false
# Configuration for accessing metrics via HTTPS
https:
# Does the client require https to access the metrics?
enabled: false
# Does the client require requests be sent to http or https?
forceHTTPSScheme: false
# If set to true, the client will create a service account with adequate permissions and set a flag
# on the client to use the service account token provided by it to make authorized scrape requests
useServiceAccountCredentials: false
# Configuration for authentication to metrics via https endpoint
authenticationMethod:
# Reads token from defined file in container
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenFile:
enabled: false
bearerTokenFilePath: "/var/run/secrets/kubernetes.io/serviceaccount/token"
# Reads token from defined secret in namespace
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenSecret:
enabled: false
# Reads token from defined secret in namespace
authorization:
enabled: false
type: "bearer"
credentials:
key: "token"
optional: false
# If set to true, the client will disable SSL security checks
insecureSkipVerify: false
# Directory on host where necessary TLS cert and key to scrape metrics can be found
certDir: ""
# Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings
certFile: ""
keyFile: ""
caCertFile: ""
# seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host.
# Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided.
seLinuxOptions: {}
metrics:
# Whether the client should publish PushProx client-specific metrics to .Values.clients.port
enabled: false
rbac:
# Additional permissions to provide to the ServiceAccount bound to the client
# This can be used to provide additional permissions for the client to scrape metrics from the k8s API
# Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true
additionalRules: []
# Resource limits
resources: {}
# Options to select all nodes to deploy client DaemonSet on
nodeSelector: {}
tolerations: []
affinity: {}
image:
repository: rancher/pushprox-client
tag: v0.1.3-rancher2-client
command: ["pushprox-client"]
copyCertsImage:
repository: rancher/mirrored-library-busybox
tag: 1.31.1
# The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes.
# This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in
# situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod.
# However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment,
# this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet.
# If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will
# be responsible for upgrading this chart accordingly to the right number of replicas.
deployment:
enabled: false
replicas: 0
proxy:
enabled: true
# The port through which PushProx clients will communicate to the proxy
port: 8080
# Resource limits
resources: {}
# Options to select a node to run a single proxy deployment on
nodeSelector: {}
tolerations: []
image:
repository: rancher/pushprox-proxy
tag: v0.1.3-rancher2-proxy
command: ["pushprox-proxy"]

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-pushprox
apiVersion: v1
appVersion: 0.1.0
description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx
clients.
kubeVersion: '>=1.26.0-0'
name: k3sServer
type: application
version: 0.2.0

View File

@ -0,0 +1,90 @@
# rancher-pushprox
A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster.
Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy.
Using an instance of this chart is suitable for the following scenarios:
- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster)
- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics)
- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath`
- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`)
- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`)
The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Configuration
The following tables list the configurable parameters of the rancher-pushprox chart and their default values.
### General
#### Required
| Parameter | Description | Example |
| ----- | ----------- | ------ |
| `component` | The component that is being monitored | `kube-etcd`
| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://<HOST_IP>:<metricsPort>/metrics`) | `2379` |
| `namespaceOverride` | The namespace to install the chart | `""`
#### Optional
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` |
| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` |
| `service.selector` | The selector that is used to populate the Service's Endpoints object. The chart will error out on rendering templating if `.Values.clients.enabled` is set alongside this field, since it is expected that this service should point to the PushProx Clients Daemonset / Deployment | `{}` |
| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` |
| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` |
| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` |
| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` |
| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.forceHTTPSScheme` | Forces scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.enabled` | If set to true, the client will use service account credentials mounted at the configured path `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath`. This requires permissions to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath` | This is a volume mount on the pod with permissions to scrape `/metrics` endpoint of Kubernetes components | `"/var/run/secrets/kubernetes.io/serviceaccount/token"` |
| `clients.https.authenticationMethod.bearerTokenSecret.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components. This method is deprecated by the prometheus operator and may be removed in a future release | `false` |
| `clients.https.authenticationMethod.authorization.enabled` | If set to true, the client will use service account credentials to scrape `/metrics` endpoint of Kubernetes components | `false` |
| `clients.https.authenticationMethod.authorization.type` | If set, the client will use this type of authorization in its client requests for metrics | `"bearer"` |
| `clients.https.authenticationMethod.authorization.credentials.key` | If set, the client will use this key in the secret created by `clients.https.useServiceAccountCredentials` for authorization in its client requests for metrics | `"token"` |
| `clients.https.authenticationMethod.authorization.credentials.optional` | If set to false, the client will fail if the key in the secret created by `clients.https.useServiceAccountCredentials` does not exist | `false` |
| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` |
| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.seLinuxOptions` | seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host. Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided. | `""` |
| `clients.metrics.enabled` | Whether the client should publish PushProx client-specific metrics. | `false` |
| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` |
| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` |
| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` |
| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` |
| `clients.resources` | Set resource limits and requests for the client container | `{}` |
| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` |
| `clients.tolerations` | Specify tolerations for clients | `[]` |
| `proxy.enabled` | Deploys the proxy that each client will register with | `true` |
| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` |
| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` |
| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` |
| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` |
| `kubeVersionOverrides` | A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides. For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches any of the semver constraints provided as keys on the map. On seeing a match, the default value for each values.yaml field overridden will be updated with the new value. If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order. | `[]`
*Tip: The filepaths set in `clients.https.<cert|key|caCert>File` can include wildcard characters*.
See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used.

View File

@ -0,0 +1,170 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
# General
{{- define "applyKubeVersionOverrides" -}}
{{- $overrides := dict -}}
{{- range $override := .Values.kubeVersionOverrides -}}
{{- if semverCompare $override.constraint $.Capabilities.KubeVersion.Version -}}
{{- $_ := mergeOverwrite $overrides $override.values -}}
{{- end -}}
{{- end -}}
{{- $_ := mergeOverwrite .Values $overrides -}}
{{- end -}}
{{- define "pushprox.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{- define "pushProxy.commonLabels" -}}
release: {{ .Release.Name }}
component: {{ .Values.component | quote }}
provider: kubernetes
{{- end -}}
{{- define "pushProxy.proxyUrl" -}}
{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}}
{{- if .Values.clients.proxyUrl -}}
{{ printf "%s" .Values.clients.proxyUrl }}
{{- else -}}
{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }}
{{- end -}}{{- end -}}
# Client
{{- define "pushProxy.client.name" -}}
{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.serviceAccountTokenName" -}}
{{- printf "pushprox-%s-client-service-account-token" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.labels" -}}
k8s-app: {{ template "pushProxy.client.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# Proxy
{{- define "pushProxy.proxy.name" -}}
{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.proxy.labels" -}}
k8s-app: {{ template "pushProxy.proxy.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# ServiceMonitor
{{- define "pushprox.serviceMonitor.name" -}}
{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.serviceMonitor.labels" -}}
app: {{ template "pushprox.serviceMonitor.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
{{- define "pushProxy.serviceMonitor.endpoints" -}}
{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}}
{{- $useHTTPS := .Values.clients.https.enabled -}}
{{- $setHTTPSScheme := .Values.clients.https.forceHTTPSScheme -}}
{{- $insecureSkipVerify := .Values.clients.https.insecureSkipVerify -}}
{{- $useServiceAccountCredentials := .Values.clients.https.useServiceAccountCredentials -}}
{{- $serviceAccountTokenName := (include "pushProxy.client.serviceAccountTokenName" . ) -}}
{{- $metricRelabelings := list }}
{{- $endpoints := .Values.serviceMonitor.endpoints }}
{{- if .Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- range $endpoints }}
{{- if $.Values.proxy.enabled }}
{{- $_ := set . "proxyUrl" $proxyURL }}
{{- end }}
{{- $clusterIdRelabel := dict }}
{{- $metricRelabelings := list }}
{{- if $.Values.global.cattle.clusterId }}
{{- $_ := set $clusterIdRelabel "action" "replace" }}
{{- $_ := set $clusterIdRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterIdRelabel "targetLabel" "cluster_id" }}
{{- $_ := set $clusterIdRelabel "replacement" $.Values.global.cattle.clusterId }}
{{- $metricRelabelings = append $metricRelabelings $clusterIdRelabel }}
{{- end }}
{{- $clusterNameRelabel := dict }}
{{- if $.Values.global.cattle.clusterName }}
{{- $_ := set $clusterNameRelabel "action" "replace" }}
{{- $_ := set $clusterNameRelabel "sourceLabels" (list "__address__") }}
{{- $_ := set $clusterNameRelabel "targetLabel" "cluster_name" }}
{{- $_ := set $clusterNameRelabel "replacement" $.Values.global.cattle.clusterName }}
{{- $metricRelabelings = append $metricRelabelings $clusterNameRelabel }}
{{- end }}
{{- if not (empty $metricRelabelings) }}
{{- $_ := set . "metricRelabelings" ($metricRelabelings)}}
{{- end }}
{{- if $setHTTPSScheme -}}
{{- $_ := set . "scheme" "https" }}
{{- end -}}
{{- if $useHTTPS -}}
{{- if (hasKey . "params") }}
{{- $_ := set (get . "params") "_scheme" (list "https") }}
{{- else }}
{{- $_ := set . "params" (dict "_scheme" (list "https")) }}
{{- end }}
{{- end }}
{{- if (hasKey . "tlsConfig") }}
{{- $_ := set (get . "tlsConfig") "insecureSkipVerify" $insecureSkipVerify }}
{{- else }}
{{- $_ := set . "tlsConfig" (dict "insecureSkipVerify" $insecureSkipVerify) }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenFile.enabled }}
{{- $_ := set . "bearerTokenFile" $.Values.clients.https.authenticationMethod.bearerTokenFile.bearerTokenFilePath }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.bearerTokenSecret.enabled }}
{{- $_ := set . "bearerTokenSecret" $serviceAccountTokenName }}
{{- end }}
{{- if $.Values.clients.https.authenticationMethod.authorization.enabled }}
{{- if (hasKey . "authorization") }}
{{- $_ := set (get . "authorization") "type" $.Values.clients.https.authenticationMethod.authorization.type }}
{{- $_ := set (get . "authorization") "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional) }}
{{- else }}
{{- $_ := set . "authorization" (dict "type" $.Values.clients.https.authenticationMethod.authorization.type) }}
{{- $_ := set . "authorization" (dict "credentials" (dict "name" $serviceAccountTokenName "key" $.Values.clients.https.authenticationMethod.authorization.credentials.key "optional" $.Values.clients.https.authenticationMethod.authorization.credentials.optional)) }}
{{- end }}
{{- end }}
{{- end }}
{{- toYaml $endpoints }}
{{- end -}}

View File

@ -0,0 +1,97 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.client.name" . }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- if .Values.clients.rbac.additionalRules }}
{{ toYaml .Values.clients.rbac.additionalRules }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
---
{{- if .Values.clients.https.useServiceAccountCredentials }}
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: {{ template "pushProxy.client.serviceAccountTokenName" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
annotations:
kubernetes.io/service-account.name: {{ template "pushProxy.client.name" . }}
{{- end }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 6 }}
{{- end }}
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
- 'emptyDir'
- 'hostPath'
allowedHostPaths:
- pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,157 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: apps/v1
{{- if .Values.clients.deployment.enabled }}
kind: Deployment
{{- else }}
kind: DaemonSet
{{- end }}
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
pushprox-exporter: "client"
spec:
{{- if .Values.clients.deployment.enabled }}
replicas: {{ .Values.clients.deployment.replicas }}
{{- end }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.client.labels" . | nindent 8 }}
spec:
{{- if .Values.clients.affinity }}
affinity: {{ toYaml .Values.clients.affinity | nindent 8 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.clients.nodeSelector }}
{{ toYaml .Values.clients.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.clients.tolerations }}
{{ toYaml .Values.clients.tolerations | indent 8 }}
{{- end }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ template "pushProxy.client.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-client
image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }}
command:
{{- range .Values.clients.command }}
- {{ . | quote }}
{{- end }}
args:
- --fqdn=$(HOST_IP)
- --proxy-url=$(PROXY_URL)
{{- if .Values.clients.metrics.enabled }}
- --metrics-addr=$(PORT)
{{- end }}
- --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}}
{{- if .Values.clients.useLocalhost }}
- --use-localhost
{{- end }}
{{- if .Values.clients.https.enabled }}
{{- if .Values.clients.https.insecureSkipVerify }}
- --insecure-skip-verify
{{- end }}
{{- if .Values.clients.https.useServiceAccountCredentials }}
- --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
{{- end }}
{{- if .Values.clients.https.certDir }}
- --tls.cert=/etc/ssl/push-proxy/push-proxy.pem
- --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem
- --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem
{{- end }}
{{- end }}
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{- if .Values.clients.metrics.enabled }}
- name: PORT
value: :{{ .Values.clients.port }}
{{- end }}
- name: PROXY_URL
value: {{ template "pushProxy.proxyUrl" . }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumeMounts:
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
{{- end }}
{{- if .Values.clients.resources }}
resources: {{ toYaml .Values.clients.resources | nindent 10 }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
initContainers:
- name: copy-certs
image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }}
command:
- sh
- -c
- |
echo "Searching for files to copy within the source volume"
echo "cert: ${CERT_FILE_NAME}"
echo "key: ${KEY_FILE_NAME}"
echo "cacert: ${CACERT_FILE_NAME}"
CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1)
KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1)
CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1)
test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1
test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1
test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1
echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET"
cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1
chmod 444 $CERT_FILE_TARGET || exit 1
echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET"
cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1
chmod 444 $KEY_FILE_TARGET || exit 1
echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET"
cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1
chmod 444 $CACERT_FILE_TARGET || exit 1
env:
- name: CERT_FILE_NAME
value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }}
- name: KEY_FILE_NAME
value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }}
- name: CACERT_FILE_NAME
value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }}
- name: CERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy.pem
- name: KEY_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-key.pem
- name: CACERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem
securityContext:
runAsNonRoot: false
{{- if and .Values.global.seLinux.enabled .Values.clients.https.seLinuxOptions }}
seLinuxOptions: {{ .Values.clients.https.seLinuxOptions | toYaml | nindent 12 }}
{{- end }}
volumeMounts:
- name: metrics-cert-dir-source
mountPath: /etc/source
readOnly: true
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
volumes:
- name: metrics-cert-dir-source
hostPath:
path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
- name: metrics-cert-dir
emptyDir: {}
{{- end }}
{{- end }}{{- end }}

View File

@ -0,0 +1,68 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
rules:
{{- if .Values.global.cattle.psp.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.proxy.name" . }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.proxy.name" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.proxy.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
---
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ include "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- end }}{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
pushprox-exporter: "proxy"
spec:
selector:
matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }}
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.proxy.nodeSelector }}
{{ toYaml .Values.proxy.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.proxy.tolerations }}
{{ toYaml .Values.proxy.tolerations | indent 8 }}
{{- end }}
serviceAccountName: {{ template "pushProxy.proxy.name" . }}
{{- if .Values.global.imagePullSecretName }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecretName }}
{{- end }}
containers:
- name: pushprox-proxy
image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}
command:
{{- range .Values.proxy.command }}
- {{ . | quote }}
{{- end }}
{{- if .Values.proxy.resources }}
resources: {{ toYaml .Values.proxy.resources | nindent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.proxy.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
spec:
ports:
- name: pp-proxy
port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }}
protocol: TCP
targetPort: {{ .Values.proxy.port }}
selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,45 @@
{{- template "applyKubeVersionOverrides" . -}}
{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "pushprox.serviceMonitor.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }}
spec:
endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }}
jobLabel: component
podTargetLabels:
- component
- pushprox-exporter
namespaceSelector:
matchNames:
- {{ template "pushprox.namespace" . }}
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
---
{{- $selector := "" }}
{{- if not (kindIs "invalid" .Values.service) }}
{{- if not (kindIs "invalid" .Values.service.selector) }}
{{ if .Values.service.selector }}
{{- if .Values.clients.enabled }}
{{- required (printf "Cannot override .Values.service.selector=%s when .Values.clients.enabled=true" (toJson .Values.service.selector)) "" }}
{{- end }}
{{- $selector = (toYaml .Values.service.selector) }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
ports:
- name: metrics
port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}}
protocol: TCP
targetPort: {{ .Values.metricsPort }}
selector: {{ default (include "pushProxy.client.labels" .) $selector | nindent 4 }}
{{- end }}{{- end }}

View File

@ -0,0 +1,14 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "monitoring.coreos.com/v1/ServiceMonitor" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install Prometheus Operator CRDs before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,166 @@
# Default values for rancher-pushprox.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Default image containing both the proxy and the client was generated from the following Dockerfile
# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15
# Configuration
global:
cattle:
psp:
enabled: false
systemDefaultRegistry: ""
seLinux:
enabled: false
# A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides.
#
# For each key in kubeVersionOverrides, this chart will check to see if the current Kubernetes cluster's version matches
# any of the semver constraints provided as keys on the map.
#
# On seeing a match, the default value for each values.yaml field overridden will be updated with the new value.
#
# If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order.
#
# Notes:
# - On running a helm template, Helm generally assumes the kubeVersion is v1.20.0
# - On running a helm install --dry-run, the correct kubeVersion should be chosen.
kubeVersionOverrides: []
# - constraint: "< 1.21"
# values:
# metricsPort: 10252
# clients:
# https:
# enabled: false
# insecureSkipVerify: false
# useServiceAccountCredentials: false
namespaceOverride: ""
# The component that is being monitored (i.e. etcd)
component: "component"
# The port containing the metrics that need to be scraped
metricsPort: 2739
# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint
serviceMonitor:
enabled: true
# A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec
# Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
# By default, proxyUrl and params._scheme will be overridden based on other values
endpoints:
- port: metrics
# Configure Service that grabs scrape targets
service:
# The selector that is used to populate the Service's Endpoints object.
# The chart will error out on rendering templating if .Values.clients.enabled is set alongside this field,
# since it is expected that this service should point to the PushProx Clients Daemonset / Deployment
selector: {}
clients:
enabled: true
# The port which the PushProx client will post PushProx metrics to
port: 9369
# If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}}
# Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null
proxyUrl: ""
# If set to true, the client will forward any requests from the host IP to 127.0.0.1
# It will only allow proxy requests to the metricsPort specified
useLocalhost: false
# Configuration for accessing metrics via HTTPS
https:
# Does the client require https to access the metrics?
enabled: false
# Does the client require requests be sent to http or https?
forceHTTPSScheme: false
# If set to true, the client will create a service account with adequate permissions and set a flag
# on the client to use the service account token provided by it to make authorized scrape requests
useServiceAccountCredentials: false
# Configuration for authentication to metrics via https endpoint
authenticationMethod:
# Reads token from defined file in container
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenFile:
enabled: false
bearerTokenFilePath: "/var/run/secrets/kubernetes.io/serviceaccount/token"
# Reads token from defined secret in namespace
# This function is deprecated in the prometheus operator api and may be removed in a future version
bearerTokenSecret:
enabled: false
# Reads token from defined secret in namespace
authorization:
enabled: false
type: "bearer"
credentials:
key: "token"
optional: false
# If set to true, the client will disable SSL security checks
insecureSkipVerify: false
# Directory on host where necessary TLS cert and key to scrape metrics can be found
certDir: ""
# Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings
certFile: ""
keyFile: ""
caCertFile: ""
# seLinuxOptions to be passed into the container that copies certs. Should define a container with permissions to read the files in the certDir provided on the host.
# Required and only used if `clients.https.enabled` is set and `clients.https.certDir` is provided.
seLinuxOptions: {}
metrics:
# Whether the client should publish PushProx client-specific metrics to .Values.clients.port
enabled: false
rbac:
# Additional permissions to provide to the ServiceAccount bound to the client
# This can be used to provide additional permissions for the client to scrape metrics from the k8s API
# Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true
additionalRules: []
# Resource limits
resources: {}
# Options to select all nodes to deploy client DaemonSet on
nodeSelector: {}
tolerations: []
affinity: {}
image:
repository: rancher/pushprox-client
tag: v0.1.3-rancher2-client
command: ["pushprox-client"]
copyCertsImage:
repository: rancher/mirrored-library-busybox
tag: 1.31.1
# The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes.
# This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in
# situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod.
# However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment,
# this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet.
# If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will
# be responsible for upgrading this chart accordingly to the right number of replicas.
deployment:
enabled: false
replicas: 0
proxy:
enabled: true
# The port through which PushProx clients will communicate to the proxy
port: 8080
# Resource limits
resources: {}
# Options to select a node to run a single proxy deployment on
nodeSelector: {}
tolerations: []
image:
repository: rancher/pushprox-proxy
tag: v0.1.3-rancher2-proxy
command: ["pushprox-proxy"]

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,32 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/os: linux
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-kube-state-metrics
apiVersion: v2
appVersion: 2.10.1
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
- metric
- monitoring
- prometheus
- kubernetes
maintainers:
- email: tariq.ibrahim@mulesoft.com
name: tariq1890
- email: manuel@rueg.eu
name: mrueg
- email: david@0xdc.me
name: dotdc
name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 5.16.4

Some files were not shown because too many files have changed in this diff Show More