Make charts

pull/1995/head
Steven Crespo 2022-07-25 11:17:41 -07:00
parent e30ce4449c
commit af3f77b0fc
41 changed files with 1207 additions and 8 deletions

View File

@ -0,0 +1,27 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.6.0-0 < 2.7.0-0'
catalog.cattle.io/release-name: rancher-alerting-drivers
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 100.0.1
apiVersion: v2
appVersion: 1.16.0
dependencies:
- condition: prom2teams.enabled
name: prom2teams
repository: file://./charts/prom2teams
- condition: sachet.enabled
name: sachet
repository: file://./charts/sachet
description: The manager for third-party webhook receivers used in Prometheus Alertmanager
icon: https://charts.rancher.io/assets/logos/alerting-drivers.svg
keywords:
- monitoring
- alertmanger
- webhook
name: rancher-alerting-drivers
version: 100.0.3

View File

@ -0,0 +1,11 @@
# Rancher Alerting Drivers
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
Currently, this chart supports the following Drivers:
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.

View File

@ -0,0 +1,11 @@
# Rancher Alerting Drivers
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
Currently, this chart supports the following Drivers:
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-prom2teams
apiVersion: v1
appVersion: 3.2.3
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
name: prom2teams
version: 0.2.0

View File

@ -0,0 +1,44 @@
{%- set
theme_colors = {
'resolved' : '2DC72D',
'critical' : '8C1A1A',
'severe' : '8C1A1A',
'warning' : 'FF9A0B',
'unknown' : 'CCCCCC'
}
-%}
{
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}",
"summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}",
"title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}",
"sections": [{
"activityTitle": "{{ msg_text.summary }}",
"facts": [{% if msg_text.name %}{
"name": "Alert",
"value": "{{ msg_text.name }}"
},{% endif %}{% if msg_text.instance %}{
"name": "In host",
"value": "{{ msg_text.instance }}"
},{% endif %}{% if msg_text.severity %}{
"name": "Severity",
"value": "{{ msg_text.severity }}"
},{% endif %}{% if msg_text.description %}{
"name": "Description",
"value": "{{ msg_text.description }}"
},{% endif %}{
"name": "Status",
"value": "{{ msg_text.status }}"
}{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_labels[key] }}"
}{% endfor %}{% endif %}
{% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_annotations[key] }}"
}{% endfor %}{% endif %}],
"markdown": true
}]
}

View File

@ -0,0 +1,2 @@
Prom2Teams has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}"

View File

@ -0,0 +1,73 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prom2teams.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prom2teams.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "prom2teams.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "prom2teams.labels" -}}
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
app.kubernetes.io/instance: {{ .Release.Name }}
release: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}}
{{- if not (has .Values.prom2teams.loglevel $valid) -}}
{{- fail "Invalid log level"}}
{{- end -}}
{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}}
{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}}
{{- end -}}
{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "prom2teams.namespace" . }}
name: {{ include "prom2teams.fullname" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.ini: |-
[HTTP Server]
Host: {{ .Values.prom2teams.host }}
Port: {{ .Values.prom2teams.port }}
[Microsoft Teams]
{{- with .Values.prom2teams.connector }}
Connector: {{ . }}
{{- end }}
{{- range $key, $val := .Values.prom2teams.connectors }}
{{ $key }}: {{ $val }}
{{- end }}
[Group Alerts]
Field: {{ .Values.prom2teams.group_alerts_by }}
[Log]
Level: {{ .Values.prom2teams.loglevel }}
[Template]
Path: {{ .Values.prom2teams.templatepath }}
teams.j2: {{ .Files.Get "files/teams.j2" | quote }}
{{- end -}}

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ include "prom2teams.fullname" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "prom2teams.fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8089
protocol: TCP
volumeMounts:
- name: config
mountPath: /opt/prom2teams/helmconfig/
env:
- name: APP_CONFIG_FILE
value: {{ .Values.prom2teams.config | quote }}
- name: PROM2TEAMS_PORT
value: {{ .Values.prom2teams.port | quote }}
- name: PROM2TEAMS_HOST
value: {{ .Values.prom2teams.ip | quote }}
- name: PROM2TEAMS_CONNECTOR
value: {{ .Values.prom2teams.connector | quote }}
- name: PROM2TEAMS_GROUP_ALERTS_BY
value: {{ .Values.prom2teams.group_alerts_by | quote }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext.enabled }}
securityContext:
privileged: false
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}

View File

@ -0,0 +1,29 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "prom2teams.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "prom2teams.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "prom2teams.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels:
{{ include "prom2teams.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 8089
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,66 @@
# Default values for prom2teams.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "prom2teams"
fullnameOverride: ""
replicaCount: 1
image:
repository: rancher/mirrored-idealista-prom2teams
tag: 3.2.3
pullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 200Mi
service:
type: ClusterIP
port: 8089
prom2teams:
host: 0.0.0.0
port: 8089
connector: the-connector-url
connectors: {}
# group_alerts_by can be one of
# ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url")
group_alerts_by:
# loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL)
loglevel: INFO
templatepath: /opt/prom2teams/helmconfig/teams.j2
config: /opt/prom2teams/helmconfig/config.ini
# Security Context properties
securityContext:
# enabled is a flag to enable Security Context
enabled: true
# runAsUser is the user ID used to run the container
runAsUser: 65534
# runAsGroup is the primary group ID used to run all processes within any container of the pod
runAsGroup: 65534
# fsGroup is the group ID associated with the container
fsGroup: 65534
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
readOnlyRootFilesystem: true
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
affinity: {}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-sachet
apiVersion: v2
appVersion: 0.2.6
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
name: sachet
type: application
version: 1.0.1

View File

@ -0,0 +1 @@
# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl

View File

@ -0,0 +1,3 @@
rancher-sachet is now installed on the cluster!
Please refer to the upstream documentation for configuration options:
https://github.com/messagebird/sachet

View File

@ -0,0 +1,79 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "sachet.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end }}
{{/*
Expand the name of the chart.
*/}}
{{- define "sachet.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "sachet.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "sachet.labels" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{ include "sachet.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "sachet.selectorLabels" -}}
app.kubernetes.io/name: {{ include "sachet.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,34 @@
{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "sachet.namespace" . }}
name: {{ include "sachet.fullname" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.yaml: |-
{{- if and (not .Values.sachet.providers) (not .Values.sachet.receivers) }}
# please refer to the upstream documentation for configuration options:
# https://github.com/messagebird/sachet
#
# providers:
# aliyun:
# region_id:
# ...
# receivers:
# - name: 'team-sms'
# provider: 'aliyu'
# ...
{{- end }}
{{- with .Values.sachet.providers }}
providers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sachet.receivers }}
receivers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations: {{ toYaml . | nindent 8 }}
{{- end }}
labels: {{ include "sachet.selectorLabels" . | nindent 8 }}
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "sachet.fullname" . }}
{{- with .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 9876
protocol: TCP
livenessProbe:
httpGet:
path: /-/live
port: http
readinessProbe:
httpGet:
path: /-/ready
port: http
volumeMounts:
- mountPath: /etc/sachet/
name: config-volume
{{- with .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
- name: config-reloader
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }}
imagePullPolicy: {{ .Values.configReloader.pullPolicy }}
args:
- -volume-dir=/watch-config
- -webhook-method=POST
- -webhook-status-code=200
- -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
volumeMounts:
- mountPath: /watch-config
name: config-volume
volumes:
- name: config-volume
configMap:
name: {{ include "sachet.fullname" . }}
defaultMode: 0777

View File

@ -0,0 +1,29 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "sachet.fullname" . }}-psp
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "sachet.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "sachet.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "sachet.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if contains "NodePort" .Values.service.type }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector: {{ include "sachet.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,67 @@
# Default values for sachet.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "sachet"
fullnameOverride: ""
configReloader:
repository: rancher/mirrored-jimmidyson-configmap-reload
pullPolicy: IfNotPresent
tag: v0.4.0
sachet:
# reference: https://github.com/messagebird/sachet/blob/master/examples/config.yaml
providers: {}
receivers: []
replicaCount: 1
image:
repository: rancher/mirrored-messagebird-sachet
pullPolicy: IfNotPresent
tag: 0.2.6
imagePullSecrets: []
podAnnotations: {}
podSecurityContext:
securityContext:
runAsUser: 1000
runAsNonRoot: true
runAsGroup: 1000
service:
type: ClusterIP
port: 9876
nodePort: 30001
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
affinity: {}

View File

@ -0,0 +1,14 @@
categories:
- monitoring
namespace: cattle-monitoring-system
questions:
- variable: prom2teams.enabled
default: false
label: Enable Microsoft Teams
type: boolean
group: "General"
- variable: sachet.enabled
default: false
label: Enable SMS
type: boolean
group: "General"

View File

@ -0,0 +1,2 @@
rancher-alerting-drivers is now installed on the cluster!
Please refer to the upstream documentation for each Driver for configuration options.

View File

@ -0,0 +1,117 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "drivers.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "drivers.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "drivers.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "drivers.labels" -}}
helm.sh/chart: {{ include "drivers.chart" . }}
{{ include "drivers.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "drivers.selectorLabels" -}}
app.kubernetes.io/name: {{ include "drivers.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "drivers.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "drivers.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
https://github.com/helm/helm/issues/4535#issuecomment-477778391
Usage: {{ include "call-nested" (list . "SUBCHART_NAME" "TEMPLATE") }}
e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }}
*/}}
{{- define "call-nested" }}
{{- $dot := index . 0 }}
{{- $subchart := index . 1 | splitList "." }}
{{- $template := index . 2 }}
{{- $values := $dot.Values }}
{{- range $subchart }}
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}
{{/*
Get the list of configMaps to be managed
*/}}
{{- define "drivers.configmapList" -}}
{{- if .Values.sachet.enabled -}}
- {{ include "call-nested" (list . "sachet" "sachet.fullname") }}
{{- end }}
{{- if .Values.prom2teams.enabled -}}
- {{ include "call-nested" (list . "prom2teams" "prom2teams.fullname") }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
{{- if and (not .Values.sachet.enabled) (not .Values.prom2teams.enabled) -}}
{{- fail "At least one Driver must be enabled to install the chart. " }}
{{- end -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-admin
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-edit
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-view
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- 'get'
- 'list'
- 'watch'

View File

@ -0,0 +1,122 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "drivers.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "drivers.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
backoffLimit: 1
template:
spec:
serviceAccountName: {{ include "drivers.fullname" . }}-patch-sa
securityContext:
runAsNonRoot: true
runAsUser: 1000
restartPolicy: Never
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
containers:
- name: {{ include "drivers.fullname" . }}-patch-sa
image: "{{ include "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: IfNotPresent
command: ["kubectl", "-n", {{ .Release.Namespace | quote }}, "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "drivers.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "drivers.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-patch-sa
labels: {{ include "drivers.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "patch"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- {{ include "drivers.fullname" . }}-patch-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "drivers.fullname" . }}-patch-sa
labels: {{ include "drivers.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "drivers.fullname" . }}-patch-sa
subjects:
- kind: ServiceAccount
name: {{ include "drivers.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "drivers.fullname" . }}-patch-sa
labels: {{ include "drivers.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "drivers.fullname" . }}-default-allow-all
namespace: {{ .Release.Namespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,27 @@
# Default values for rancher-alerting-driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
# the registry where all images will be pulled from
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.20.2
# set this value if you want the sub-charts to be installed into
# a namespace rather than where this chart is installed
namespaceOverride: ""
prom2teams:
enabled: false
sachet:
enabled: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []

View File

@ -3,7 +3,7 @@ annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Logging
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.24.0-0'
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1

View File

@ -9,7 +9,7 @@ annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Monitoring
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.24.0-0'
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1

View File

@ -2026,6 +2026,38 @@ entries:
- assets/rancher-aks-operator-crd/rancher-aks-operator-crd-100.0.0+up1.0.1.tgz
version: 100.0.0+up1.0.1
rancher-alerting-drivers:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.6.0-0 < 2.7.0-0'
catalog.cattle.io/release-name: rancher-alerting-drivers
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 100.0.1
apiVersion: v2
appVersion: 1.16.0
created: "2022-07-25T11:14:12.736413-07:00"
dependencies:
- condition: prom2teams.enabled
name: prom2teams
repository: file://./charts/prom2teams
- condition: sachet.enabled
name: sachet
repository: file://./charts/sachet
description: The manager for third-party webhook receivers used in Prometheus
Alertmanager
digest: ae4cacd71c273f207e50d9f7fdd2725d41e14d547cabc23889e87db5072aec6c
icon: https://charts.rancher.io/assets/logos/alerting-drivers.svg
keywords:
- monitoring
- alertmanger
- webhook
name: rancher-alerting-drivers
urls:
- assets/rancher-alerting-drivers/rancher-alerting-drivers-100.0.3.tgz
version: 100.0.3
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
@ -5506,7 +5538,7 @@ entries:
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Logging
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.24.0-0'
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
@ -5517,10 +5549,10 @@ entries:
catalog.cattle.io/upstream-version: 3.17.7
apiVersion: v1
appVersion: 3.17.7
created: "2022-07-12T07:42:47.477584+02:00"
created: "2022-07-25T11:14:27.000038-07:00"
description: Collects and filter logs using highly configurable CRDs. Powered
by Banzai Cloud Logging Operator.
digest: aeb3bbb03c562c7020470cd4aea16dcb8249580467f10fcb56b34effd45aa0d9
digest: 068d4ee4376c857b0a0a4fb95978308b3f66aac356a72218f43594ebd6bd2db5
icon: https://charts.rancher.io/assets/logos/logging.svg
keywords:
- logging
@ -5998,7 +6030,7 @@ entries:
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Monitoring
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.24.0-0'
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1
@ -6011,7 +6043,7 @@ entries:
catalog.cattle.io/upstream-version: 19.0.3
apiVersion: v2
appVersion: 0.50.0
created: "2022-07-19T15:59:08.388553-07:00"
created: "2022-07-25T11:15:12.454389-07:00"
dependencies:
- condition: grafana.enabled
name: grafana
@ -6082,7 +6114,7 @@ entries:
description: Collects several related Helm charts, Grafana dashboards, and Prometheus
rules combined with documentation and scripts to provide easy to operate end-to-end
Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
digest: ad61859573df4ba6f94d16367610b375508b3d70ac18331dfec1e3e777463c6c
digest: afcb2bf1dc3874426d473c630cd5c82a017c9eab37c1cc99963e03a66b0a5f10
home: https://github.com/prometheus-operator/kube-prometheus
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords: