mirror of https://git.rancher.io/charts
make charts
parent
fe9f16cbbe
commit
3cc99421e8
Binary file not shown.
|
@ -0,0 +1,27 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Alerting Drivers
|
||||
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/permits-os: linux,windows
|
||||
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
|
||||
catalog.cattle.io/release-name: rancher-alerting-drivers
|
||||
catalog.cattle.io/type: cluster-tool
|
||||
catalog.cattle.io/upstream-version: 100.0.1
|
||||
apiVersion: v2
|
||||
appVersion: 1.16.0
|
||||
dependencies:
|
||||
- condition: prom2teams.enabled
|
||||
name: prom2teams
|
||||
repository: file://./charts/prom2teams
|
||||
- condition: sachet.enabled
|
||||
name: sachet
|
||||
repository: file://./charts/sachet
|
||||
description: The manager for third-party webhook receivers used in Prometheus Alertmanager
|
||||
icon: https://charts.rancher.io/assets/logos/alerting-drivers.svg
|
||||
keywords:
|
||||
- monitoring
|
||||
- alertmanger
|
||||
- webhook
|
||||
name: rancher-alerting-drivers
|
||||
version: 103.0.0
|
|
@ -0,0 +1,11 @@
|
|||
# Rancher Alerting Drivers
|
||||
|
||||
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
|
||||
|
||||
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
|
||||
|
||||
Currently, this chart supports the following Drivers:
|
||||
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
|
||||
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
|
||||
|
||||
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.
|
|
@ -0,0 +1,29 @@
|
|||
# Rancher Alerting Drivers
|
||||
|
||||
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
|
||||
|
||||
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
|
||||
|
||||
Currently, this chart supports the following Drivers:
|
||||
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
|
||||
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
|
||||
|
||||
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.
|
||||
|
||||
## Upgrading to Kubernetes v1.25+
|
||||
|
||||
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
|
||||
|
||||
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
|
||||
|
||||
> **Note:**
|
||||
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
|
||||
|
||||
> **Note:**
|
||||
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
|
||||
>
|
||||
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
|
||||
|
||||
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
|
||||
|
||||
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
|
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: rancher-prom2teams
|
||||
apiVersion: v1
|
||||
appVersion: 4.2.1
|
||||
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
|
||||
name: prom2teams
|
||||
version: 0.2.0
|
|
@ -0,0 +1,44 @@
|
|||
{%- set
|
||||
theme_colors = {
|
||||
'resolved' : '2DC72D',
|
||||
'critical' : '8C1A1A',
|
||||
'severe' : '8C1A1A',
|
||||
'warning' : 'FF9A0B',
|
||||
'unknown' : 'CCCCCC'
|
||||
}
|
||||
-%}
|
||||
|
||||
{
|
||||
"@type": "MessageCard",
|
||||
"@context": "http://schema.org/extensions",
|
||||
"themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}",
|
||||
"summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}",
|
||||
"title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}",
|
||||
"sections": [{
|
||||
"activityTitle": "{{ msg_text.summary }}",
|
||||
"facts": [{% if msg_text.name %}{
|
||||
"name": "Alert",
|
||||
"value": "{{ msg_text.name }}"
|
||||
},{% endif %}{% if msg_text.instance %}{
|
||||
"name": "In host",
|
||||
"value": "{{ msg_text.instance }}"
|
||||
},{% endif %}{% if msg_text.severity %}{
|
||||
"name": "Severity",
|
||||
"value": "{{ msg_text.severity }}"
|
||||
},{% endif %}{% if msg_text.description %}{
|
||||
"name": "Description",
|
||||
"value": "{{ msg_text.description }}"
|
||||
},{% endif %}{
|
||||
"name": "Status",
|
||||
"value": "{{ msg_text.status }}"
|
||||
}{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{
|
||||
"name": "{{ key }}",
|
||||
"value": "{{ msg_text.extra_labels[key] }}"
|
||||
}{% endfor %}{% endif %}
|
||||
{% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{
|
||||
"name": "{{ key }}",
|
||||
"value": "{{ msg_text.extra_annotations[key] }}"
|
||||
}{% endfor %}{% endif %}],
|
||||
"markdown": true
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
Prom2Teams has been installed. Check its status by running:
|
||||
kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}"
|
|
@ -0,0 +1,73 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
{{- define "linux-node-selector" -}}
|
||||
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
beta.kubernetes.io/os: linux
|
||||
{{- else -}}
|
||||
kubernetes.io/os: linux
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prom2teams.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prom2teams.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "prom2teams.namespace" -}}
|
||||
{{ default .Release.Namespace .Values.global.namespaceOverride }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prom2teams.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,39 @@
|
|||
{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}}
|
||||
{{- if not (has .Values.prom2teams.loglevel $valid) -}}
|
||||
{{- fail "Invalid log level"}}
|
||||
{{- end -}}
|
||||
{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}}
|
||||
{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}}
|
||||
{{- end -}}
|
||||
{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}}
|
||||
{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/resource-policy": keep
|
||||
data:
|
||||
config.ini: |-
|
||||
[HTTP Server]
|
||||
Host: {{ .Values.prom2teams.host }}
|
||||
Port: {{ .Values.prom2teams.port }}
|
||||
[Microsoft Teams]
|
||||
{{- with .Values.prom2teams.connector }}
|
||||
Connector: {{ . }}
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.prom2teams.connectors }}
|
||||
{{ $key }}: {{ $val }}
|
||||
{{- end }}
|
||||
[Group Alerts]
|
||||
Field: {{ .Values.prom2teams.group_alerts_by }}
|
||||
[Log]
|
||||
Level: {{ .Values.prom2teams.loglevel }}
|
||||
[Template]
|
||||
Path: {{ .Values.prom2teams.templatepath }}
|
||||
teams.j2: {{ .Files.Get "files/teams.j2" | quote }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,83 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "prom2teams.fullname" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8089
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /opt/prom2teams/helmconfig/
|
||||
env:
|
||||
- name: APP_CONFIG_FILE
|
||||
value: {{ .Values.prom2teams.config | quote }}
|
||||
- name: PROM2TEAMS_PORT
|
||||
value: {{ .Values.prom2teams.port | quote }}
|
||||
- name: PROM2TEAMS_HOST
|
||||
value: {{ .Values.prom2teams.host | quote }}
|
||||
- name: PROM2TEAMS_CONNECTOR
|
||||
value: {{ .Values.prom2teams.connector | quote }}
|
||||
- name: PROM2TEAMS_GROUP_ALERTS_BY
|
||||
value: {{ .Values.prom2teams.group_alerts_by | quote }}
|
||||
- name: PROM2TEAMS_LOGLEVEL
|
||||
value: {{ .Values.prom2teams.loglevel }}
|
||||
{{- range $key, $value := .Values.prom2teams.extraEnv }}
|
||||
- name: "{{ $key }}"
|
||||
value: "{{ $value }}"
|
||||
{{- end }}
|
||||
resources: {{ toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{- toYaml .Values.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}
|
||||
runAsUser: {{ .Values.securityContext.runAsUser }}
|
||||
runAsGroup: {{ .Values.securityContext.runAsGroup }}
|
||||
fsGroup: {{ .Values.securityContext.fsGroup }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
{{- if .Values.global.cattle.psp.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp-{{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- {{ include "prom2teams.fullname" . }}-psp-{{ include "prom2teams.namespace" . }}
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels:
|
||||
{{ include "prom2teams.labels" . | indent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: 8089
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -0,0 +1,69 @@
|
|||
# Default values for prom2teams.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
global:
|
||||
cattle:
|
||||
psp:
|
||||
enabled: false
|
||||
systemDefaultRegistry: ""
|
||||
namespaceOverride: ""
|
||||
|
||||
nameOverride: "prom2teams"
|
||||
fullnameOverride: ""
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/mirrored-idealista-prom2teams
|
||||
tag: 4.2.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8089
|
||||
|
||||
prom2teams:
|
||||
host: 0.0.0.0
|
||||
port: 8089
|
||||
connector: the-connector-url
|
||||
connectors: {}
|
||||
# group_alerts_by can be one of
|
||||
# ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url")
|
||||
group_alerts_by:
|
||||
# loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL)
|
||||
loglevel: INFO
|
||||
templatepath: /opt/prom2teams/helmconfig/teams.j2
|
||||
config: /opt/prom2teams/helmconfig/config.ini
|
||||
extraEnv: {}
|
||||
|
||||
# Security Context properties
|
||||
securityContext:
|
||||
# enabled is a flag to enable Security Context
|
||||
enabled: true
|
||||
# runAsUser is the user ID used to run the container
|
||||
runAsUser: 101
|
||||
# runAsGroup is the primary group ID used to run all processes within any container of the pod
|
||||
runAsGroup: 101
|
||||
# fsGroup is the group ID associated with the container
|
||||
fsGroup: 101
|
||||
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
## Node labels for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## List of node taints to tolerate (requires Kubernetes >= 1.6)
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,11 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: rancher-sachet
|
||||
apiVersion: v2
|
||||
appVersion: 0.3.1
|
||||
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
|
||||
name: sachet
|
||||
type: application
|
||||
version: 1.0.1
|
|
@ -0,0 +1 @@
|
|||
# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl
|
|
@ -0,0 +1,3 @@
|
|||
rancher-sachet is now installed on the cluster!
|
||||
Please refer to the upstream documentation for configuration options:
|
||||
https://github.com/messagebird/sachet
|
|
@ -0,0 +1,79 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
{{- define "linux-node-selector" -}}
|
||||
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
beta.kubernetes.io/os: linux
|
||||
{{- else -}}
|
||||
kubernetes.io/os: linux
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "sachet.namespace" -}}
|
||||
{{ default .Release.Namespace .Values.global.namespaceOverride }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "sachet.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "sachet.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "sachet.labels" -}}
|
||||
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{ include "sachet.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "sachet.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "sachet.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}}
|
||||
{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/resource-policy": keep
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- if and (not .Values.sachet.providers) (not .Values.sachet.receivers) }}
|
||||
# please refer to the upstream documentation for configuration options:
|
||||
# https://github.com/messagebird/sachet
|
||||
#
|
||||
# providers:
|
||||
# aliyun:
|
||||
# region_id:
|
||||
# ...
|
||||
# receivers:
|
||||
# - name: 'team-sms'
|
||||
# provider: 'aliyu'
|
||||
# ...
|
||||
{{- end }}
|
||||
{{- with .Values.sachet.providers }}
|
||||
providers: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sachet.receivers }}
|
||||
receivers: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,75 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels: {{ include "sachet.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{- toYaml .Values.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "sachet.fullname" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9876
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/live
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: http
|
||||
volumeMounts:
|
||||
- mountPath: /etc/sachet/
|
||||
name: config-volume
|
||||
{{- with .Values.resources }}
|
||||
resources: {{ toYaml .Values.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: config-reloader
|
||||
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }}
|
||||
imagePullPolicy: {{ .Values.configReloader.pullPolicy }}
|
||||
args:
|
||||
- -volume-dir=/watch-config
|
||||
- -webhook-method=POST
|
||||
- -webhook-status-code=200
|
||||
- -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
|
||||
volumeMounts:
|
||||
- mountPath: /watch-config
|
||||
name: config-volume
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
defaultMode: 0777
|
|
@ -0,0 +1,61 @@
|
|||
{{- if .Values.global.cattle.psp.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp-{{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- {{ include "sachet.fullname" . }}-psp-{{ include "sachet.namespace" . }}
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector: {{ include "sachet.selectorLabels" . | nindent 4 }}
|
|
@ -0,0 +1,69 @@
|
|||
# Default values for sachet.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
global:
|
||||
cattle:
|
||||
psp:
|
||||
enabled: false
|
||||
systemDefaultRegistry: ""
|
||||
namespaceOverride: ""
|
||||
|
||||
nameOverride: "sachet"
|
||||
fullnameOverride: ""
|
||||
|
||||
configReloader:
|
||||
repository: rancher/mirrored-jimmidyson-configmap-reload
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.8.0
|
||||
|
||||
sachet:
|
||||
# reference: https://github.com/messagebird/sachet/blob/master/examples/config.yaml
|
||||
providers: {}
|
||||
|
||||
receivers: []
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/mirrored-messagebird-sachet
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 0.3.1
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext:
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
runAsGroup: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9876
|
||||
nodePort: 30001
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Node labels for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## List of node taints to tolerate (requires Kubernetes >= 1.6)
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -0,0 +1,17 @@
|
|||
categories:
|
||||
- monitoring
|
||||
namespace: cattle-monitoring-system
|
||||
questions:
|
||||
- variable: prom2teams.enabled
|
||||
label: Enable Microsoft Teams
|
||||
type: boolean
|
||||
group: "General"
|
||||
- variable: sachet.enabled
|
||||
label: Enable SMS
|
||||
type: boolean
|
||||
group: "General"
|
||||
- variable: global.cattle.psp.enabled
|
||||
description: "Flag to enable or disable the installation of PodSecurityPolicies by this chart in the target cluster. If the cluster is running Kubernetes 1.25+, you must update this value to false."
|
||||
label: "Enable PodSecurityPolicies"
|
||||
type: boolean
|
||||
group: "Security Settings"
|
|
@ -0,0 +1,2 @@
|
|||
rancher-alerting-drivers is now installed on the cluster!
|
||||
Please refer to the upstream documentation for each Driver for configuration options.
|
|
@ -0,0 +1,117 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
{{- define "linux-node-selector" -}}
|
||||
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
beta.kubernetes.io/os: linux
|
||||
{{- else -}}
|
||||
kubernetes.io/os: linux
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "drivers.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "drivers.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "drivers.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "drivers.labels" -}}
|
||||
helm.sh/chart: {{ include "drivers.chart" . }}
|
||||
{{ include "drivers.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "drivers.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "drivers.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "drivers.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "drivers.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
https://github.com/helm/helm/issues/4535#issuecomment-477778391
|
||||
Usage: {{ include "call-nested" (list . "SUBCHART_NAME" "TEMPLATE") }}
|
||||
e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }}
|
||||
*/}}
|
||||
{{- define "call-nested" }}
|
||||
{{- $dot := index . 0 }}
|
||||
{{- $subchart := index . 1 | splitList "." }}
|
||||
{{- $template := index . 2 }}
|
||||
{{- $values := $dot.Values }}
|
||||
{{- range $subchart }}
|
||||
{{- $values = index $values . }}
|
||||
{{- end }}
|
||||
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Get the list of configMaps to be managed
|
||||
*/}}
|
||||
{{- define "drivers.configmapList" -}}
|
||||
{{- if .Values.sachet.enabled -}}
|
||||
- {{ include "call-nested" (list . "sachet" "sachet.fullname") }}
|
||||
{{- end }}
|
||||
{{- if .Values.prom2teams.enabled -}}
|
||||
- {{ include "call-nested" (list . "prom2teams" "prom2teams.fullname") }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,50 @@
|
|||
{{- if and (not .Values.sachet.enabled) (not .Values.prom2teams.enabled) -}}
|
||||
{{- fail "At least one Driver must be enabled to install the chart. " }}
|
||||
{{- end -}}
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-admin-{{ .Release.Namespace }}
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-edit-{{ .Release.Namespace }}
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-view-{{ .Release.Namespace }}
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
|
||||
verbs:
|
||||
- 'get'
|
||||
- 'list'
|
||||
- 'watch'
|
|
@ -0,0 +1,126 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ include "drivers.fullname" . }}-patch-sa
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
restartPolicy: Never
|
||||
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{ toYaml .Values.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
image: "{{ include "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["kubectl", "-n", {{ .Release.Namespace | quote }}, "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts"]
|
||||
verbs: ["get", "patch"]
|
||||
{{- if .Values.global.cattle.psp.enabled }}
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
verbs: ["use"]
|
||||
resourceNames:
|
||||
- {{ include "drivers.fullname" . }}-patch-sa
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
{{- if .Values.global.cattle.psp.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-patch-sa
|
||||
labels: {{ include "drivers.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
spec:
|
||||
privileged: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
volumes:
|
||||
- 'secret'
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "drivers.fullname" . }}-default-allow-all
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
egress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
|
@ -0,0 +1,7 @@
|
|||
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
|
||||
#{{- if .Values.global.cattle.psp.enabled }}
|
||||
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
|
||||
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
|
||||
#{{- end }}
|
||||
#{{- end }}
|
||||
#{{- end }}
|
|
@ -0,0 +1,29 @@
|
|||
# Default values for rancher-alerting-driver.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
global:
|
||||
cattle:
|
||||
psp:
|
||||
enabled: false
|
||||
# the registry where all images will be pulled from
|
||||
systemDefaultRegistry: ""
|
||||
kubectl:
|
||||
repository: rancher/kubectl
|
||||
tag: v1.20.2
|
||||
# set this value if you want the sub-charts to be installed into
|
||||
# a namespace rather than where this chart is installed
|
||||
namespaceOverride: ""
|
||||
|
||||
prom2teams:
|
||||
enabled: false
|
||||
|
||||
sachet:
|
||||
enabled: true
|
||||
|
||||
## Node labels for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## List of node taints to tolerate (requires Kubernetes >= 1.6)
|
||||
tolerations: []
|
32
index.yaml
32
index.yaml
|
@ -5157,6 +5157,38 @@ entries:
|
|||
- assets/rancher-aks-operator-crd/rancher-aks-operator-crd-100.0.0+up1.0.1.tgz
|
||||
version: 100.0.0+up1.0.1
|
||||
rancher-alerting-drivers:
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Alerting Drivers
|
||||
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/permits-os: linux,windows
|
||||
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
|
||||
catalog.cattle.io/release-name: rancher-alerting-drivers
|
||||
catalog.cattle.io/type: cluster-tool
|
||||
catalog.cattle.io/upstream-version: 100.0.1
|
||||
apiVersion: v2
|
||||
appVersion: 1.16.0
|
||||
created: "2023-09-04T21:40:21.607057361+05:30"
|
||||
dependencies:
|
||||
- condition: prom2teams.enabled
|
||||
name: prom2teams
|
||||
repository: file://./charts/prom2teams
|
||||
- condition: sachet.enabled
|
||||
name: sachet
|
||||
repository: file://./charts/sachet
|
||||
description: The manager for third-party webhook receivers used in Prometheus
|
||||
Alertmanager
|
||||
digest: 392ee8c099e74e9a2b52c42d5f11cbeb158d9f79f99f66350ada13d1ad4b2d98
|
||||
icon: https://charts.rancher.io/assets/logos/alerting-drivers.svg
|
||||
keywords:
|
||||
- monitoring
|
||||
- alertmanger
|
||||
- webhook
|
||||
name: rancher-alerting-drivers
|
||||
urls:
|
||||
- assets/rancher-alerting-drivers/rancher-alerting-drivers-103.0.0.tgz
|
||||
version: 103.0.0
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Alerting Drivers
|
||||
|
|
Loading…
Reference in New Issue