Merge pull request #1130 from jiaqiluo/fix-configmap-missing

fix the issue where the configmap is missing when upgrading rancher-alerting-drives to enable a new driver
pull/1125/head
actions 2021-04-21 19:31:00 +00:00
parent cbd4e36640
commit 0dab92c00b
63 changed files with 1787 additions and 0 deletions

Binary file not shown.

View File

@ -0,0 +1,21 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-alerting-drivers
apiVersion: v2
appVersion: 1.16.0
dependencies:
- condition: prom2teams.enabled
name: prom2teams
repository: file://./charts/prom2teams
- condition: sachet.enabled
name: sachet
repository: file://./charts/sachet
description: The manager for third-party webhook receivers used in Prometheus Alertmanager
keywords:
- monitoring
- alertmanger
- webhook
name: rancher-alerting-drivers
version: 1.0.100-rc03

View File

@ -0,0 +1,11 @@
# Rancher Alerting Drivers
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
Currently, this chart supports the following Drivers:
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-prom2teams
apiVersion: v1
appVersion: 3.2.1
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
name: prom2teams
version: 0.2.0

View File

@ -0,0 +1,44 @@
{%- set
theme_colors = {
'resolved' : '2DC72D',
'critical' : '8C1A1A',
'severe' : '8C1A1A',
'warning' : 'FF9A0B',
'unknown' : 'CCCCCC'
}
-%}
{
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}",
"summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}",
"title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}",
"sections": [{
"activityTitle": "{{ msg_text.summary }}",
"facts": [{% if msg_text.name %}{
"name": "Alert",
"value": "{{ msg_text.name }}"
},{% endif %}{% if msg_text.instance %}{
"name": "In host",
"value": "{{ msg_text.instance }}"
},{% endif %}{% if msg_text.severity %}{
"name": "Severity",
"value": "{{ msg_text.severity }}"
},{% endif %}{% if msg_text.description %}{
"name": "Description",
"value": "{{ msg_text.description }}"
},{% endif %}{
"name": "Status",
"value": "{{ msg_text.status }}"
}{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_labels[key] }}"
}{% endfor %}{% endif %}
{% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_annotations[key] }}"
}{% endfor %}{% endif %}],
"markdown": true
}]
}

View File

@ -0,0 +1,2 @@
Prom2Teams has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}"

View File

@ -0,0 +1,73 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prom2teams.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prom2teams.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "prom2teams.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "prom2teams.labels" -}}
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
app.kubernetes.io/instance: {{ .Release.Name }}
release: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}}
{{- if not (has .Values.prom2teams.loglevel $valid) -}}
{{- fail "Invalid log level"}}
{{- end -}}
{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}}
{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}}
{{- end -}}
{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "prom2teams.namespace" . }}
name: {{ include "prom2teams.fullname" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.ini: |-
[HTTP Server]
Host: {{ .Values.prom2teams.host }}
Port: {{ .Values.prom2teams.port }}
[Microsoft Teams]
{{- with .Values.prom2teams.connector }}
Connector: {{ . }}
{{- end }}
{{- range $key, $val := .Values.prom2teams.connectors }}
{{ $key }}: {{ $val }}
{{- end }}
[Group Alerts]
Field: {{ .Values.prom2teams.group_alerts_by }}
[Log]
Level: {{ .Values.prom2teams.loglevel }}
[Template]
Path: {{ .Values.prom2teams.templatepath }}
teams.j2: {{ .Files.Get "files/teams.j2" | quote }}
{{- end -}}

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ include "prom2teams.fullname" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "prom2teams.fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8089
protocol: TCP
volumeMounts:
- name: config
mountPath: /opt/prom2teams/helmconfig/
env:
- name: APP_CONFIG_FILE
value: {{ .Values.prom2teams.config | quote }}
- name: PROM2TEAMS_PORT
value: {{ .Values.prom2teams.port | quote }}
- name: PROM2TEAMS_HOST
value: {{ .Values.prom2teams.ip | quote }}
- name: PROM2TEAMS_CONNECTOR
value: {{ .Values.prom2teams.connector | quote }}
- name: PROM2TEAMS_GROUP_ALERTS_BY
value: {{ .Values.prom2teams.group_alerts_by | quote }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext.enabled }}
securityContext:
privileged: false
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}

View File

@ -0,0 +1,28 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "prom2teams.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "prom2teams.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "prom2teams.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels:
{{ include "prom2teams.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 8089
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,62 @@
# Default values for prom2teams.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "prom2teams"
fullnameOverride: ""
replicaCount: 1
image:
repository: rancher/mirrored-idealista-prom2teams
tag: 3.2.1
pullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 200Mi
service:
type: ClusterIP
port: 8089
prom2teams:
host: 0.0.0.0
port: 8089
connector: the-connector-url
connectors: {}
# group_alerts_by can be one of
# ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url")
group_alerts_by:
# loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL)
loglevel: INFO
templatepath: /opt/prom2teams/helmconfig/teams.j2
config: /opt/prom2teams/helmconfig/config.ini
# Security Context properties
securityContext:
# enabled is a flag to enable Security Context
enabled: true
# runAsUser is the user ID used to run the container
runAsUser: 65534
# runAsGroup is the primary group ID used to run all processes within any container of the pod
runAsGroup: 65534
# fsGroup is the group ID associated with the container
fsGroup: 65534
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
readOnlyRootFilesystem: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-sachet
apiVersion: v2
appVersion: 0.2.3
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
name: sachet
type: application
version: 1.0.1

View File

@ -0,0 +1 @@
# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl

View File

@ -0,0 +1,3 @@
rancher-sachet is now installed on the cluster!
Please refer to the upstream documentation for configuration options:
https://github.com/messagebird/sachet

View File

@ -0,0 +1,79 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "sachet.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end }}
{{/*
Expand the name of the chart.
*/}}
{{- define "sachet.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "sachet.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "sachet.labels" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{ include "sachet.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "sachet.selectorLabels" -}}
app.kubernetes.io/name: {{ include "sachet.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "sachet.namespace" . }}
name: {{ include "sachet.fullname" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.yaml: |-
{{- with .Values.sachet.providers }}
providers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sachet.receivers }}
receivers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations: {{ toYaml . | nindent 8 }}
{{- end }}
labels: {{ include "sachet.selectorLabels" . | nindent 8 }}
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "sachet.fullname" . }}
{{- with .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 9876
protocol: TCP
livenessProbe:
httpGet:
path: /-/live
port: http
readinessProbe:
httpGet:
path: /-/ready
port: http
volumeMounts:
- mountPath: /etc/sachet/
name: config-volume
{{- with .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
- name: config-reloader
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }}
imagePullPolicy: {{ .Values.configReloader.pullPolicy }}
args:
- -volume-dir=/watch-config
- -webhook-method=POST
- -webhook-status-code=200
- -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
volumeMounts:
- mountPath: /watch-config
name: config-volume
volumes:
- name: config-volume
configMap:
name: {{ include "sachet.fullname" . }}
defaultMode: 0777

View File

@ -0,0 +1,28 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "sachet.fullname" . }}-psp
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "sachet.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "sachet.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "sachet.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if contains "NodePort" .Values.service.type }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector: {{ include "sachet.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,63 @@
# Default values for sachet.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "sachet"
fullnameOverride: ""
configReloader:
repository: rancher/mirrored-jimmidyson-configmap-reload
pullPolicy: IfNotPresent
tag: v0.4.0
sachet:
# reference: https://github.com/messagebird/sachet/blob/master/examples/config.yaml
providers: {}
receivers: []
replicaCount: 1
image:
repository: rancher/mirrored-messagebird-sachet
pullPolicy: IfNotPresent
tag: 0.2.3
imagePullSecrets: []
podAnnotations: {}
podSecurityContext:
securityContext:
runAsUser: 1000
runAsNonRoot: true
runAsGroup: 1000
service:
type: ClusterIP
port: 9876
nodePort: 30001
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,14 @@
categories:
- monitoring
namespace: cattle-monitoring-system
questions:
- variable: prom2teams.enabled
default: false
label: Enable Microsoft Teams
type: boolean
group: "General"
- variable: sachet.enabled
default: false
label: Enable SMS
type: boolean
group: "General"

View File

@ -0,0 +1,2 @@
rancher-alerting-drivers is now installed on the cluster!
Please refer to the upstream documentation for each Driver for configuration options.

View File

@ -0,0 +1,91 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "drivers.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "drivers.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "drivers.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "drivers.labels" -}}
helm.sh/chart: {{ include "drivers.chart" . }}
{{ include "drivers.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "drivers.selectorLabels" -}}
app.kubernetes.io/name: {{ include "drivers.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "drivers.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "drivers.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
https://github.com/helm/helm/issues/4535#issuecomment-477778391
Usage: {{ include "call-nested" (list . "SUBCHART_NAME" "TEMPLATE") }}
e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }}
*/}}
{{- define "call-nested" }}
{{- $dot := index . 0 }}
{{- $subchart := index . 1 | splitList "." }}
{{- $template := index . 2 }}
{{- $values := $dot.Values }}
{{- range $subchart }}
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}
{{/*
Get the list of configMaps to be managed
*/}}
{{- define "drivers.configmapList" -}}
{{- if .Values.sachet.enabled -}}
- {{ include "call-nested" (list . "sachet" "sachet.fullname") }}
{{- end }}
{{- if .Values.prom2teams.enabled -}}
- {{ include "call-nested" (list . "prom2teams" "prom2teams.fullname") }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
{{- if and (not .Values.sachet.enabled) (not .Values.prom2teams.enabled) -}}
{{- fail "At least one Driver must be enabled to install the chart. " }}
{{- end -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-admin
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-edit
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "drivers.fullname" . }}-view
labels: {{ include "drivers.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames: {{ include "drivers.configmapList" . | nindent 6 }}
verbs:
- 'get'
- 'list'
- 'watch'

View File

@ -0,0 +1,17 @@
# Default values for rancher-alerting-driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
# the registry where all images will be pulled from
systemDefaultRegistry: ""
# set this value if you want the sub-charts to be installed into
# a namespace rather than where this chart is installed
namespaceOverride: ""
prom2teams:
enabled: false
sachet:
enabled: false

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-prom2teams
apiVersion: v1
appVersion: 3.2.1
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
name: rancher-prom2teams
version: 0.2.000-rc03

View File

@ -0,0 +1,44 @@
{%- set
theme_colors = {
'resolved' : '2DC72D',
'critical' : '8C1A1A',
'severe' : '8C1A1A',
'warning' : 'FF9A0B',
'unknown' : 'CCCCCC'
}
-%}
{
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}",
"summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}",
"title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}",
"sections": [{
"activityTitle": "{{ msg_text.summary }}",
"facts": [{% if msg_text.name %}{
"name": "Alert",
"value": "{{ msg_text.name }}"
},{% endif %}{% if msg_text.instance %}{
"name": "In host",
"value": "{{ msg_text.instance }}"
},{% endif %}{% if msg_text.severity %}{
"name": "Severity",
"value": "{{ msg_text.severity }}"
},{% endif %}{% if msg_text.description %}{
"name": "Description",
"value": "{{ msg_text.description }}"
},{% endif %}{
"name": "Status",
"value": "{{ msg_text.status }}"
}{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_labels[key] }}"
}{% endfor %}{% endif %}
{% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{
"name": "{{ key }}",
"value": "{{ msg_text.extra_annotations[key] }}"
}{% endfor %}{% endif %}],
"markdown": true
}]
}

View File

@ -0,0 +1,2 @@
Prom2Teams has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}"

View File

@ -0,0 +1,73 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prom2teams.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prom2teams.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "prom2teams.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "prom2teams.labels" -}}
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
app.kubernetes.io/instance: {{ .Release.Name }}
release: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}}
{{- if not (has .Values.prom2teams.loglevel $valid) -}}
{{- fail "Invalid log level"}}
{{- end -}}
{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}}
{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}}
{{- end -}}
{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "prom2teams.namespace" . }}
name: {{ include "prom2teams.fullname" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.ini: |-
[HTTP Server]
Host: {{ .Values.prom2teams.host }}
Port: {{ .Values.prom2teams.port }}
[Microsoft Teams]
{{- with .Values.prom2teams.connector }}
Connector: {{ . }}
{{- end }}
{{- range $key, $val := .Values.prom2teams.connectors }}
{{ $key }}: {{ $val }}
{{- end }}
[Group Alerts]
Field: {{ .Values.prom2teams.group_alerts_by }}
[Log]
Level: {{ .Values.prom2teams.loglevel }}
[Template]
Path: {{ .Values.prom2teams.templatepath }}
teams.j2: {{ .Files.Get "files/teams.j2" | quote }}
{{- end -}}

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ include "prom2teams.fullname" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "prom2teams.fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8089
protocol: TCP
volumeMounts:
- name: config
mountPath: /opt/prom2teams/helmconfig/
env:
- name: APP_CONFIG_FILE
value: {{ .Values.prom2teams.config | quote }}
- name: PROM2TEAMS_PORT
value: {{ .Values.prom2teams.port | quote }}
- name: PROM2TEAMS_HOST
value: {{ .Values.prom2teams.ip | quote }}
- name: PROM2TEAMS_CONNECTOR
value: {{ .Values.prom2teams.connector | quote }}
- name: PROM2TEAMS_GROUP_ALERTS_BY
value: {{ .Values.prom2teams.group_alerts_by | quote }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext.enabled }}
securityContext:
privileged: false
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}

View File

@ -0,0 +1,28 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
labels: {{ include "prom2teams.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "prom2teams.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "prom2teams.fullname" . }}-psp
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "prom2teams.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "prom2teams.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels: {{ include "prom2teams.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prom2teams.fullname" . }}
namespace: {{ include "prom2teams.namespace" . }}
labels:
{{ include "prom2teams.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 8089
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,62 @@
# Default values for prom2teams.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "prom2teams"
fullnameOverride: ""
replicaCount: 1
image:
repository: rancher/mirrored-idealista-prom2teams
tag: 3.2.1
pullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 200Mi
service:
type: ClusterIP
port: 8089
prom2teams:
host: 0.0.0.0
port: 8089
connector: the-connector-url
connectors: {}
# group_alerts_by can be one of
# ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url")
group_alerts_by:
# loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL)
loglevel: INFO
templatepath: /opt/prom2teams/helmconfig/teams.j2
config: /opt/prom2teams/helmconfig/config.ini
# Security Context properties
securityContext:
# enabled is a flag to enable Security Context
enabled: true
# runAsUser is the user ID used to run the container
runAsUser: 65534
# runAsGroup is the primary group ID used to run all processes within any container of the pod
runAsGroup: 65534
# fsGroup is the group ID associated with the container
fsGroup: 65534
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
readOnlyRootFilesystem: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-sachet
apiVersion: v2
appVersion: 0.2.3
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
name: rancher-sachet
type: application
version: 1.0.100-rc03

View File

@ -0,0 +1 @@
# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl

View File

@ -0,0 +1,3 @@
rancher-sachet is now installed on the cluster!
Please refer to the upstream documentation for configuration options:
https://github.com/messagebird/sachet

View File

@ -0,0 +1,79 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "sachet.namespace" -}}
{{ default .Release.Namespace .Values.global.namespaceOverride }}
{{- end }}
{{/*
Expand the name of the chart.
*/}}
{{- define "sachet.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "sachet.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "sachet.labels" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{ include "sachet.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "sachet.selectorLabels" -}}
app.kubernetes.io/name: {{ include "sachet.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}}
{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ include "sachet.namespace" . }}
name: {{ include "sachet.fullname" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/resource-policy": keep
data:
config.yaml: |-
{{- with .Values.sachet.providers }}
providers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sachet.receivers }}
receivers: {{ toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations: {{ toYaml . | nindent 8 }}
{{- end }}
labels: {{ include "sachet.selectorLabels" . | nindent 8 }}
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "sachet.fullname" . }}
{{- with .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 9876
protocol: TCP
livenessProbe:
httpGet:
path: /-/live
port: http
readinessProbe:
httpGet:
path: /-/ready
port: http
volumeMounts:
- mountPath: /etc/sachet/
name: config-volume
{{- with .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
- name: config-reloader
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }}
imagePullPolicy: {{ .Values.configReloader.pullPolicy }}
args:
- -volume-dir=/watch-config
- -webhook-method=POST
- -webhook-status-code=200
- -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
volumeMounts:
- mountPath: /watch-config
name: config-volume
volumes:
- name: config-volume
configMap:
name: {{ include "sachet.fullname" . }}
defaultMode: 0777

View File

@ -0,0 +1,28 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "sachet.fullname" . }}-psp
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resourceNames:
- {{ include "sachet.fullname" . }}-psp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "sachet.fullname" . }}-psp
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "sachet.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "sachet.fullname" . }}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "sachet.fullname" . }}
namespace: {{ include "sachet.namespace" . }}
labels: {{ include "sachet.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if contains "NodePort" .Values.service.type }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector: {{ include "sachet.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,63 @@
# Default values for sachet.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
namespaceOverride: ""
nameOverride: "sachet"
fullnameOverride: ""
configReloader:
repository: rancher/mirrored-jimmidyson-configmap-reload
pullPolicy: IfNotPresent
tag: v0.4.0
sachet:
# reference: https://github.com/messagebird/sachet/blob/master/examples/config.yaml
providers: {}
receivers: []
replicaCount: 1
image:
repository: rancher/mirrored-messagebird-sachet
pullPolicy: IfNotPresent
tag: 0.2.3
imagePullSecrets: []
podAnnotations: {}
podSecurityContext:
securityContext:
runAsUser: 1000
runAsNonRoot: true
runAsGroup: 1000
service:
type: ClusterIP
port: 9876
nodePort: 30001
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1836,6 +1836,32 @@ entries:
- released/assets/longhorn/longhorn-crd-1.0.200.tgz
version: 1.0.200
rancher-alerting-drivers:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-alerting-drivers
apiVersion: v2
appVersion: 1.16.0
created: "2021-04-21T19:30:59.638973174Z"
dependencies:
- condition: prom2teams.enabled
name: prom2teams
repository: file://./charts/prom2teams
- condition: sachet.enabled
name: sachet
repository: file://./charts/sachet
description: The manager for third-party webhook receivers used in Prometheus
Alertmanager
digest: 4e6ae29c5ace7253df6d3ab03f365586dee5886983840252760dd1f4f745c36b
keywords:
- monitoring
- alertmanger
- webhook
name: rancher-alerting-drivers
urls:
- assets/rancher-alerting-drivers/rancher-alerting-drivers-1.0.100-rc03.tgz
version: 1.0.100-rc03
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Alerting Drivers
@ -8541,6 +8567,20 @@ entries:
- released/assets/rancher-operator-crd/rancher-operator-crd-0.1.0-alpha800.tgz
version: 0.1.0-alpha800
rancher-prom2teams:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-prom2teams
apiVersion: v1
appVersion: 3.2.1
created: "2021-04-21T19:31:00.376394323Z"
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
digest: 95e6b362dc67dc83e0359845ccad06a0d38cfbecf45203e6a4d361eea79f1d8e
name: rancher-prom2teams
urls:
- assets/rancher-prom2teams/rancher-prom2teams-0.2.000-rc03.tgz
version: 0.2.000-rc03
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
@ -8748,6 +8788,21 @@ entries:
- released/assets/rancher-pushprox/rancher-pushprox-0.1.0.tgz
version: 0.1.0
rancher-sachet:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/os: linux
catalog.cattle.io/release-name: rancher-sachet
apiVersion: v2
appVersion: 0.2.3
created: "2021-04-21T19:31:00.381182575Z"
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
digest: 9bf4bcf83bf17d0972fbca2c45284b1e010358c841d86bc04300c73bc5fe87eb
name: rancher-sachet
type: application
urls:
- assets/rancher-sachet/rancher-sachet-1.0.100-rc03.tgz
version: 1.0.100-rc03
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"