Updating assets, charts, and index.yaml

pull/445/head
Samuel Attwood 2022-06-16 15:08:11 -04:00
parent 8b741f4cab
commit 732fd26820
265 changed files with 39080 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,19 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dkube
catalog.cattle.io/release-name: dkube
apiVersion: v2
appVersion: 3.2.0.1
description: A Kubernetes-based MLOps platform based on open standards Kubeflow and
MLflow
home: https://dkube.io
icon: https://www.dkube.io/img/logo_new.png
keywords:
- kubernetes
- MLOps
- Kubeflow
- AI
kubeVersion: "1.20"
name: dkube-deployer
type: application
version: 1.0.601

View File

@ -0,0 +1,25 @@
# Dkube
[DKube](https://dkube.io/) is an MLOps product based on best of Kubeflow and MLFlow. It is optimized for implementation on-prem or in the cloud. You get the flexibility and innovation of open source ref architectures like Kubeflow and MLFlow as a supported product.
With DKube you can prepare your data including feature engineering, train AI models, optimize, tune and publish AI models and be able to deploy/serve those models. Kubeflow pipelines, KF Serving, MLFlow experiment tracking and comparison are all provided while allowing you to track the model and data versioning for reproducibility, audits and governance.
## Installation
### Requirements
The following is the minimum configuration required to deploy DKube on a Rancher cluster
- The minimal configuration for each of the worker nodes is as follows:
- 16 cores
- 64 GB RAM
- 300 GB storage for Root Volume
- The worker nodes could be brought up with any of the following OS distributions
- Ubuntu 20.04
- CentOS / RHEL 7.9
- Amazon Linux 2 for installations on AWS
- Storage
- The recommended storage option for DKube meta-data and user ML resources is an external NFS server with a min of 1TB storage available.
- For evaluation purposes, one of the worker nodes can be configured as the storage option. In this case the recommended size of storage on the worker node is 1 TB and a minimum size of 400 GB.
- Dkube requires a Kubernetes version of 1.20.
- Dkube images registry details are required for installation. Please send a mail to support@dkube.io for the details.
For more information on installation, refer to the [Dkube Installation Guide](https://dkube.io/install/install3_x/Install-Advanced.html).

View File

@ -0,0 +1,326 @@
questions:
- variable: EULA
description: "The Dkube EULA is available at www.oneconvergence.com/EULA/One-Convergence-EULA.pdf . By accepting this license agreement you acknowledge that you have read and understood the terms and conditions mentioned. Please refer to Basic Configuration section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#basic-configuration"
type: enum
label: DKUBE-EULA
required: true
group: "General"
options:
- "yes"
- variable: username
default: ""
description: "Dkube operator's local sigh-in username: Username cannot be same as that of a namespace's name. Also, following names are restricted - dkube, dkube-infra, kubeflow, istio-system, knative-serving, harbor-system. Please refer to Basic Configuration section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#basic-configuration"
type: string
label: Username
required: true
group: "General"
show_if: "EULA=yes"
- variable: password
default: ""
description: "Dkube operator's local sigh-in password"
type: password
label: Password
required: true
group: "General"
show_if: "EULA=yes"
- variable: version
default: "3.2.0.1"
description: "Version of dkube to be installed"
type: string
label: Dkube version
required: true
group: "General"
show_if: "EULA=yes"
- variable: provider
default: "dkube"
description: "Kubernetes provider: Choose one of dkube/gke/okd/eks/ntnx/tanzu"
type: enum
label: Kube Provider
required: true
options:
- "dkube"
- "gke"
- "okd"
- "eks"
- "ntnx"
- "tanzu"
group: "General"
show_if: "EULA=yes"
- variable: ha
default: "false"
description: "When HA=true k8s cluster must have min 3 schedulable nodes. Please refer to resilient operation section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#resilient-operation"
type: boolean
label: HA
required: true
group: "General"
show_if: "EULA=yes"
- variable: wipedata
default: yes
description: "Wipe dkube data during helm operation install/uninstall. Choose one of yes/no"
type: enum
label: Wipe Data
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
- variable: minimal
default: no
description: "To install minimal version of dkube. Choose one of yes/no"
type: enum
label: Minimal
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
- variable: airgap
default: no
description: "To install air-gapped version of dkube. Choose one of yes/no"
type: enum
label: Airgap
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
# registry
- variable: registry.name
default: "docker.io/ocdr"
description: "Repository from where Dkube images can be picked. Format: registry/[repo]. Please contact support@dkube.io for Dkube registry details"
type: string
label: Dkube images registry
required: true
group: "Registry"
show_if: "EULA=yes"
- variable: registry.username
default: ""
description: "Container registry username"
type: string
label: Dkube images registry username
required: true
group: "Registry"
show_if: "EULA=yes"
- variable: registry.password
default: ""
description: "Container registry password"
type: password
label: Dkube images registry password
required: true
group: "Registry"
show_if: "EULA=yes"
# STORAGE
- variable: optional.storage.type
default: "disk"
description: "Type of storage. Note: ceph storage type can only be use with HA=true And pv or sc can only be used with HA=false. Please refer to Storage options section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#storage-options"
type: enum
label: Dkube storage type
options:
- "disk"
- "nfs"
- "ceph"
- "pv"
- "sc"
group: "Storage"
show_if: "EULA=yes"
subquestions:
- variable: optional.storage.path
default: "/var/dkube"
description: "Localpath on the storage node"
type: string
label: Dkube storage disk path
show_if: "optional.storage.type=disk"
- variable: optional.storage.node
default: ""
description: "Node name for dkube storage. Provide hostname of the master node if Kube provider is dkube"
type: string
label: Dkube storage disk node
show_if: "optional.storage.type=disk"
- variable: optional.storage.persistentVolume
default: ""
description: "Name of persistent volume to be used for storage"
type: string
label: Storage PV
show_if: "ha=false&&optional.storage.type=pv"
- variable: optional.storage.storageClass
default: ""
description: "Name of storage class to be used for storage. Make sure dynamic provisioner is running for the storage class name"
type: string
label: Storage class
show_if: "ha=false&&optional.storage.type=sc"
- variable: optional.storage.nfsServer
default: ""
description: "NFS server ip to be used for storage"
type: string
label: NFS Server
show_if: "optional.storage.type=nfs"
- variable: optional.storage.nfsPath
default: ""
description: "NFS path (Make sure the path exists)"
type: string
label: NFS path
show_if: "optional.storage.type=nfs"
- variable: optional.storage.cephMonitors
default: ""
description: "Comma separated IPs of ceph monitors"
type: string
label: Ceph monitors
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephSecret
default: ""
description: "Ceph secret"
type: string
label: Ceph Secret
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephFilesystem
default: ""
description: "Ceph Filesystem"
type: string
label: Ceph Filesystem
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephNamespace
default: ""
description: "Ceph Namespace"
type: string
label: Ceph Namespace
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephPath
default: "/var/lib/rook"
description: "Ceph data and configuration path for internal ceph. Internal ceph is installed when HA=true and Storage type is not equal to nfs or ceph"
type: string
label: Ceph storage path
#show_if: "ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
show_if: "ha=true"
- variable: optional.storage.cephDisk
default: ""
description: "Only for internal ceph from release 2.2.1.12. Disk name for internal ceph storage. It should be a raw formatted disk. E.g: sdb"
type: string
label: Ceph Storage Disk
#show_if: "ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
show_if: "ha=true"
# Loadbalancer
- variable: optional.loadbalancer.access
default: "nodeport"
description: "Type of dkube proxy service, possible values are nodeport and loadbalancer; Please use loadbalancer if kubeProvider is gke."
type: enum
label: Dkube access type
group: "Loadbalancer"
#show_if: "EULA=yes&&ha=true"
#show_if: "EULA=yes&&ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
#show_if: "ha=true&&optional.storage.type=ceph"
options:
- "loadbalancer"
- "nodeport"
show_subquestion_if: loadbalancer
show_if: "EULA=yes"
subquestions:
- variable: optional.loadbalancer.metallb
default: false
description: "Set true to install MetalLB Loadbalancer. Please refer to Load Balancer options section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#load-balancer-options"
type: string
label: MetalLB Loadbalancer
- variable: optional.loadbalancer.vipPool
default: ""
description: "Valid only if installLoadbalancer is true; Only CIDR notation is allowed. E.g: 192.168.2.0/24"
type: string
label: Loadbalancer VipPool
show_if: "EULA=yes"
# Modelmonitor
- variable: optional.modelmonitor.enabled
default: "false"
description: "To enable modelmonitor in dkube. (true / false). Please refer to Model Monitor section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#model-monitor"
type: boolean
label: Enable Modelmonitor
group: "General"
show_if: "EULA=yes"
# DBAAS
- variable: optional.DBAAS.database
default: ""
description: "To configure external database for dkube. Supported mysql, sqlserver(mssql). Empty will pickup default sql db installed with dkube. Please refer to section External Database of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#external-database"
type: string
label: database
group: "DBAAS"
show_if: "EULA=yes"
- variable: optional.DBAAS.dsn
default: ""
description: "Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html"
type: string
label: dsn
group: "DBAAS"
show_if: "EULA=yes"
# CICD
- variable: optional.CICD.enabled
default: "false"
description: "To enable tekton cicd with dkube. (true / false). Please refer to CICD section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#ci-cd"
type: boolean
label: CICD Enabled
group: "CICD"
show_if: "EULA=yes"
show_subquestion_if: true
subquestions:
- variable: optional.CICD.registryName
default: false
description: "Docker registry where CICD built images will be saved"
type: string
label: Docker registry name
- variable: optional.CICD.registryUsername
default: false
description: "Docker registry Username"
type: string
label: Docker registry Username
- variable: optional.CICD.registryPassword
default: false
description: "Docker registry password"
type: string
label: Docker registry Password
- variable: optional.CICD.IAMRole
default: false
description: "For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com. registryName: 'aws_account_id.dkr.ecr.region.amazonaws.com' Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess; IAMRole: 'arn:aws:iam::<aws_account_id>:role/<iam-role>'"
type: string
label: IAMRole
# Node Affinity
- variable: optional.nodeAffinity.dkubeNodesLabel
default: ""
description: "Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA. Example: DKUBE_NODES_LABEL: key1=value1. Please refer to section Node Affinity of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#node-affinity"
type: string
label: DKUBE_NODES_LABEL
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.dkubeNodesTaints
default: ""
description: "Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes. Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: DKUBE_NODES_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.gpuWorkloadTaints
default: ""
description: "Taints of the nodes where gpu workloads must be scheduled. Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: GPU_WORKLOADS_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.productionWorkloadTaints
default: ""
description: "Taints of the nodes where production workloads must be scheduled. Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: PRODUCTION_WORKLOADS_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.dkubeDockerhubCredentialsSecret
default: ""
description: "Dockerhub Secrets for OCDR images. If you don't create, this will be auto-created with default values."
type: string
label: DKUBE DOCKERHUB CREDENTIALS SECRET
group: "General"
show_if: "EULA=yes"
- variable: optional.IAMRole
default: ""
description: "AWS IAM role. Valid only if KUBE_PROVIDER=eks. This will be set as an annotation in few deployments. Format should be like: IAMRole: '<key>: <iam role>' eg: IAMRole: 'iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole'"
type: string
label: IAMRole
group: "General"
show_if: "EULA=yes&&provider=eks"

View File

@ -0,0 +1,7 @@
Installing Dkube {{ .Values.version }}
DKube Installation has started. Please use the commands below to view the installation progress. The commands are for installation only. Do not use them for upgrade.
kubectl wait --for=condition=ready --timeout=5m pod -l job-name=dkube-helm-installer
kubectl logs -l job-name=dkube-helm-installer --follow --tail=-1 && kubectl wait --for=condition=complete --timeout=30m job/dkube-helm-installer

View File

@ -0,0 +1,53 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "dkube-deployer.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dkube-deployer.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "dkube-deployer.labels" -}}
helm.sh/chart: {{ include "dkube-deployer.chart" . }}
{{ include "dkube-deployer.selectorLabels" . }}
app.kubernetes.io/version: {{ .Values.version | quote }}
app.kubernetes.io/managed-by: "dkube.io"
{{- end }}
{{/*
Selector labels
*/}}
{{- define "dkube-deployer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dkube-deployer.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Image pull secret
*/}}
{{- define "dkube-deployer.imagePullSecretData" -}}
{{- with .Values.registry }}
{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"ocdlgit@oneconvergence.com\",\"auth\":\"%s\"}}}" .name .username .password (printf "%s:%s" .username .password | b64enc) | b64enc }}
{{- end }}
{{- end }}
{{/*
model catalog enable flag
*/}}
{{- define "dkube-deployer.modelCatalog" -}}
{{- if hasPrefix "2.1" .Values.version }}
{{- printf "false" }}
{{- else }}
{{- printf "true" }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,167 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dkube-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
data:
dkube.ini: |
#################################################################
# #
# DKUBE CONFIG FILE #
# #
#################################################################
[REQUIRED]
# Choose one of dkube/gke/okd/eks/ntnx
KUBE_PROVIDER={{ .Values.provider }}
# When HA=true k8s cluster must have min 3 schedulable nodes
HA={{ .Values.ha }}
# Operator's Local Sign In Details
# Username cannot be same as that of a namespace's name.
# Also, following names are restricted- dkube, monitoring, kubeflow
# '$' is not supported
USERNAME={{ .Values.username }}
PASSWORD={{ .Values.password }}
# To wipe dkube storage
# Accepted values: yes/no
WIPEDATA={{ .Values.wipedata }}
# To install minimal version of dkube
# Accepted values: yes/no
MINIMAL={{ .Values.minimal }}
# To install air-gapped version of dkube
# Accepted values: yes/no
AIRGAP={{ .Values.airgap }}
[NODE-AFFINITY]
# Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA
# Example: DKUBE_NODES_LABEL: key1=value1
DKUBE_NODES_LABEL: {{ .Values.optional.nodeAffinity.dkubeNodesLabel }}
# Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes
# Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
DKUBE_NODES_TAINTS: {{ .Values.optional.nodeAffinity.dkubeNodesTaints }}
# Taints of the nodes where gpu workloads must be scheduled.
# Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
GPU_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.gpuWorkloadTaints }}
# Taints of the nodes where production workloads must be scheduled.
# Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
PRODUCTION_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.productionWorkloadTaints }}
[OPTIONAL]
# version of dkube installer to be used
DKUBE_INSTALLER_VERSION={{ .Values.version }}
# version of dkube to be installed
DKUBE_VERSION={{ .Values.version }}
# Dockerhub Secrets for OCDR images
# If you don't create, this will be auto-created with default values.
DKUBE_DOCKERHUB_CREDENTIALS_SECRET={{ .Values.optional.dkubeDockerhubCredentialsSecret }}
# TLS Secret of Operator's Certificate & Private Key
# If you don't create, place your certificate and private key in $HOME/.dkube
DKUBE_OPERATOR_CERTIFICATE=
# Repository from where Dkube images can be picked.
# Format: registry/[repo]
DKUBE_REGISTRY={{ .Values.registry.name }}
# Container registry username
REGISTRY_UNAME={{ .Values.registry.username }}
# Container registry password
REGISTRY_PASSWD={{ .Values.registry.password }}
# AWS IAM role
# Valid only if KUBE_PROVIDER=eks
# This will be set as an annotation in few deployments
# Format should be like:
# IAM_ROLE=<key>: <iam role>
# eg: IAM_ROLE=iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole
# Note: Don't enclose with quotes
IAM_ROLE={{ .Values.optional.IAMRole }}
[EXTERNAL]
# Type of dkube proxy service, possible values are nodeport and loadbalancer
ACCESS={{ .Values.optional.loadbalancer.access }}
# 'true' - to install MetalLB Loadbalancer
# Must fill LB_VIP_POOL if true
INSTALL_LOADBALANCER={{ .Values.optional.loadbalancer.metallb }}
# Only CIDR notation is allowed. E.g: 192.168.2.0/24
# Valid only if INSTALL_LOADBALANCER=true
LB_VIP_POOL={{ .Values.optional.loadbalancer.vipPool }}
[STORAGE]
# Type of storage
# Possible values: disk, pv, sc, nfs
# Following are required fields for corresponding storage type
# -------------------------------------------------------
# STORAGE_TYPE REQUIRED_FIELDS
# -------------------------------------------------------
# disk STORAGE_DISK_NODE and STORAGE_DISK_PATH
# pv STORAGE_PV
# sc STORAGE_SC
# nfs STORAGE_NFS_SERVER and STORAGE_NFS_PATH
# ceph STORAGE_CEPH_MONITORS and STORAGE_CEPH_SECRET
# For 2.2.1.12 and later
# ceph STORAGE_CEPH_FILESYSTEM and STORAGE_CEPH_NAMESPACE
STORAGE_TYPE={{ .Values.optional.storage.type }}
# Localpath on the storage node
STORAGE_DISK_PATH={{ .Values.optional.storage.path }}
# Nodename of the storage node
# Possible values: AUTO/<nodename>
# AUTO - Master node will be chosen for storage if KUBE_PROVIDER=dkube
STORAGE_DISK_NODE={{ .Values.optional.storage.node }}
# Name of persistent volume
STORAGE_PV={{ .Values.optional.storage.persistentVolume }}
# Name of storage class name
# Make sure dynamic provisioner is running for the storage class name
STORAGE_SC={{ .Values.optional.storage.storageClass }}
# NFS server ip
STORAGE_NFS_SERVER={{ .Values.optional.storage.nfsServer }}
# NFS path (Make sure the path exists)
STORAGE_NFS_PATH={{ .Values.optional.storage.nfsPath }}
# Comma separated IPs of ceph monitors
STORAGE_CEPH_MONITORS={{ .Values.optional.storage.cephMonitors }}
# Ceph secret
STORAGE_CEPH_SECRET={{ .Values.optional.storage.cephSecret }}
# Name of the ceph filesystem
# E.g: dkubefs
STORAGE_CEPH_FILESYSTEM={{ .Values.optional.storage.cephFilesystem }}
# Name of the namespace where ceph is installed
# E.g: rook-ceph
STORAGE_CEPH_NAMESPACE={{ .Values.optional.storage.cephNamespace }}
# Internal Ceph
# Internal ceph is installed when HA=true and STORAGE_TYPE is not in ("nfs", "ceph")
# Both the following fields are compulsory
# Configuration path for internal ceph
STORAGE_CEPH_PATH={{ .Values.optional.storage.cephPath }}
# Disk name for internal ceph storage
# It should be a raw formatted disk
# E.g: sdb
STORAGE_CEPH_DISK={{ .Values.optional.storage.cephDisk }}
[MODELMONITOR]
#To enable modelmonitor in dkube. (true / false)
ENABLED={{ .Values.optional.modelmonitor.enabled }}
[CICD]
#To enable tekton cicd with dkube. (true / false)
ENABLED={{ .Values.optional.CICD.enabled }}
#Docker registry where CICD built images will be saved.
#For DockerHub, enter docker.io/<username>
DOCKER_REGISTRY={{ .Values.optional.CICD.registryName }}
REGISTRY_USERNAME={{ .Values.optional.CICD.registryUsername }}
REGISTRY_PASSWORD={{ .Values.optional.CICD.registryPassword }}
#For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com.
#DOCKER_REGISTRY=aws_account_id.dkr.ecr.region.amazonaws.com
#Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM
#based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess
IAM_ROLE={{ .Values.optional.CICD.IAMRole }}
[MODEL-CATALOG]
#To enable model catalog with dkube. (true / false)
ENABLED={{ template "dkube-deployer.modelCatalog" . }}
#To configure external database for dkube
[DBAAS]
#Supported mysql, sqlserver(mssql)
#Empty will pickup default sql db installed with dkube.
DATABASE={{ .Values.optional.DBAAS.database }}
#Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html
DSN={{ .Values.optional.DBAAS.dsn }}

View File

@ -0,0 +1,47 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-uninstaller-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-uninstaller-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
hostPID: true
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-uninstaller-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
-
mountPath: /root/.dkube/dkube.ini
name: dkube-config
subPath: dkube.ini
{{- if eq .Values.wipedata "yes" }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "uninstall", "--wipe-data"]
{{- else }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "uninstall"]
{{- end }}
serviceAccountName: dkube-deployer-sa
volumes:
-
configMap:
name: dkube-config
name: dkube-config

View File

@ -0,0 +1,67 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-upgrade-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "-1"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-upgrade-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-upgrade-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "upgrade", {{ .Values.version | quote}}]
serviceAccountName: dkube-deployer-sa
---
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-installer-job-cleanup-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-upgrade,post-upgrade
"helm.sh/hook-weight": "-2"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-installer-job-cleanup-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-installer-job-cleanup-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
command: ["/bin/sh", "-c"]
args:
- kubectl delete job dkube-helm-installer --ignore-not-found=true
serviceAccountName: dkube-deployer-sa

View File

@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-helm-installer"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-helm-installer"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
hostPID: true
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-helm-installer
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
-
mountPath: /root/.dkube/dkube.ini
name: dkube-config
subPath: dkube.ini
{{- if eq .Values.wipedata "yes" }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "install", "--accept-eula=yes", "--wipe-data"]
{{- else }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "install", "--accept-eula={{ .Values.EULA }}"]
{{- end }}
serviceAccountName: dkube-deployer-sa
volumes:
-
configMap:
name: dkube-config
name: dkube-config

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: dkube-dockerhub-secret
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "dkube-deployer.imagePullSecretData" . }}

View File

@ -0,0 +1,136 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dkube-deployer-binding
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dkube-deployer-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dkube-deployer-sa
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dkube-deployer-clusterrole
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/portforward
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
- update
- patch
- create
- delete
- apiGroups:
- kubeflow.org
resources:
- tfjobs
verbs:
- '*'
- apiGroups:
- kubeflow.org
resources:
- mpijobs
verbs:
- '*'
- apiGroups:
- '*'
resources:
- replicasets
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- ""
resources:
- events
verbs:
- list
- apiGroups:
- ""
resources:
- persistentvolumes
- persistentvolumeclaims
- services
- endpoints
- configmaps
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- nodes
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
- clusterroles
- clusterrolebindings
verbs:
- '*'
- apiGroups:
- ""
resources:
- serviceaccounts
- secrets
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- '*'

View File

@ -0,0 +1,205 @@
{
"$schema":"http://json-schema.org/draft-07/schema",
"type":"object",
"title":"The root schema",
"description":"The root schema comprises the entire JSON document.",
"required":[
"EULA",
"username",
"password",
"version",
"provider",
"ha",
"wipedata",
"registry",
"optional"
],
"properties":{
"provider":{
"$id":"#/properties/provider",
"enum": ["dkube", "gke", "okd", "eks", "ntnx", "tanzu"]
},
"username":{
"$id":"#/properties/username",
"type":"string",
"minLength":1
},
"password":{
"$id":"#/properties/password",
"type":"string",
"minLength":1
},
"EULA":{
"$id":"#/properties/EULA",
"type":"string",
"enum": ["yes"]
},
"ha":{
"$id":"#/properties/ha",
"type":"boolean"
},
"wipedata":{
"$id":"#/properties/wipedata",
"type":"string",
"enum": ["yes", "no"]
},
"registry":{
"$id":"#/properties/registry",
"type":"object",
"required": [
"name",
"username",
"password"
],
"properties":{
"name":{
"$id":"#/properties/registry/properties/name",
"type":"string",
"minLength":1
},
"username":{
"$id":"#/properties/registry/properties/username",
"type":"string",
"minLength":1
},
"password":{
"$id":"#/properties/registry/properties/password",
"type":"string",
"minLength":1
}
}
},
"optional":{
"$id":"#/properties/optional",
"type":"object",
"required": [
"storage"
],
"properties":{
"storage":{
"$id":"#/properties/optional/properties/storage",
"type":"object",
"properties": {
"type": {
"enum": ["disk", "pv", "sc", "nfs", "ceph"]
}
},
"allOf":[
{
"if": {
"properties": {"type": {"const": "disk"}}
},
"then": {
"$ref": "#/properties/optional/definitions/disk"
}
},
{
"if": {
"properties": {"type": {"const": "pv"}}
},
"then": {
"$ref": "#/properties/optional/definitions/pv"
}
},
{
"if": {
"properties": {"type": {"const": "sc"}}
},
"then": {
"$ref": "#/properties/optional/definitions/sc"
}
},
{
"if": {
"properties": {"type": {"const": "nfs"}}
},
"then": {
"$ref": "#/properties/optional/definitions/nfs"
}
},
{
"if": {
"properties": {"type": {"const": "ceph"}}
},
"then": {
"$ref": "#/properties/optional/definitions/ceph"
}
}
]
}
},
"definitions":{
"disk":{
"properties":{
"path":{
"type":"string",
"pattern":"^(/[^/ ]*)+/?$"
},
"node":{
"type":"string",
"minLength": 1
}
},
"required":[
"path",
"node"
]
},
"pv":{
"properties":{
"persistentVolume":{
"type":"string",
"minLength": 1
}
},
"required":[
"persistentVolume"
]
},
"sc":{
"properties":{
"storageClass":{
"type":"string",
"minLength": 1
}
},
"required":[
"storageClass"
]
},
"nfs":{
"properties":{
"nfsPath":{
"type":"string",
"pattern":"^(/[^/ ]*)+/?$"
},
"nfsServer":{
"type":"string",
"minLength": 1
}
},
"required":[
"nfsPath",
"nfsServer"
]
},
"ceph":{
"properties":{
"cephMonitors":{
"type":"string"
},
"cephSecret":{
"type":"string"
},
"cephFilesystem":{
"type":"string"
},
"cephNamespace":{
"type":"string"
}
}
}
}
}
}
}

View File

@ -0,0 +1,182 @@
# The DKube EULA is available at: www.oneconvergence.com/EULA/One-Convergence-EULA.pdf
# By accepting this license agreement you acknowledge that you agree to the terms and conditions.
# The installation will only proceed if the EULA is accepted by defining the EULA value as "yes".
EULA: ""
# Operator's Local Sign In Details.
# Username cannot be same as that of a kubernetes namespace's name.
# Names like dkube, monitoring, kubeflow are restricted.
username: ""
password: ""
# dkube version
version: "3.2.0.1"
# Choose one of dkube/gke/okd/eks/ntnx/tanzu kube provider
provider: "dkube"
# For ha deployment, k8s cluster must have min 3 schedulable nodes
ha: false
# Wipe dkube data during helm operation install/uninstall.
# Choose one of yes/no
wipedata: ""
# To install minimal version of dkube
# Accepted values: yes/no
minimal: "no"
# To install air-gapped version of dkube
# Accepted values: yes/no
airgap: "no"
# Docker registry for DKube installation
registry:
# Format: registry/[repo]
name: "docker.io/ocdr"
# Container registry username
username: ""
# Container registry password
password: ""
optional:
storage:
# Type of storage
# Possible values: disk, pv, sc, nfs, ceph
# Following are required fields for corresponding storage type
# -------------------------------------------------------
# STORAGE_TYPE REQUIRED_FIELDS
# -------------------------------------------------------
# disk node and path
# pv persistentVolume
# sc storageClass
# nfs nfsServer and nfsPath
# ceph cephMonitors and cephSecret
# For release 2.2.1.12 and later
# ceph cephFilesystem and cephNamespace
type: "disk"
# Localpath on the storage node
path: "/var/dkube"
# Nodename of the storage node
# Possible values: AUTO/<nodename>
# AUTO - Master node will be chosen for storage if KUBE_PROVIDER=dkube
node: ""
# Name of persistent volume
persistentVolume: ""
# Name of storage class name
# Make sure dynamic provisioner is running for the storage class name
storageClass: ""
# NFS server ip
nfsServer: ""
# NFS path (Make sure the path exists)
nfsPath: ""
# Only for external ceph before release 2.2.1.12
# Comma separated IPs of ceph monitors
cephMonitors: ""
# Only for external ceph before release 2.2.1.12
# Ceph secret
cephSecret: ""
# Only for external ceph from release 2.2.1.12
# Name of the ceph filesystem
# E.g: dkubefs
cephFilesystem: ""
# Only for external ceph from release 2.2.1.12
# Name of the namespace where ceph is installed
# E.g: rook-ceph
cephNamespace: ""
# Internal Ceph
# Internal ceph is installed when HA=true and STORAGE_TYPE is not in ("nfs", "ceph")
# Configuration path for internal ceph
cephPath: "/var/lib/rook"
# Only for internal ceph from release 2.2.1.12
# Disk name for internal ceph storage
# It should be a raw formatted disk
# E.g: sdb
cephDisk: ""
loadbalancer:
# Type of dkube proxy service, possible values are nodeport and loadbalancer
# Please use loadbalancer if kubeProvider is gke.
access: "nodeport"
# 'true' - to install MetalLB Loadbalancer
# Must fill LB_VIP_POOL if true
metallb: "false"
# Only CIDR notation is allowed. E.g: 192.168.2.0/24
# Valid only if installLoadbalancer is true
vipPool: ""
modelmonitor:
#To enable modelmonitor in dkube. (true / false)
enabled: false
DBAAS:
# To configure external database for dkube
# Supported mysql, sqlserver(mssql)
# Empty will pickup default sql db installed with dkube
database: ""
# Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html
dsn: ""
CICD:
#To enable tekton cicd with dkube. (true / false)
enabled: false
#Docker registry where CICD built images will be saved.
registryName: "docker.io/ocdr"
registryUsername: ""
registryPassword: ""
#For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com.
#registryName: "aws_account_id.dkr.ecr.region.amazonaws.com"
#Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM
#based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess
#IAMRole: "arn:aws:iam::<aws_account_id>:role/<iam-role>"
IAMRole: ""
nodeAffinity:
# Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA
# Example: DKUBE_NODES_LABEL: key1=value1
dkubeNodesLabel: ""
# Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes
# Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
dkubeNodesTaints: ""
# Taints of the nodes where gpu workloads must be scheduled.
# Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
gpuWorkloadTaints: ""
# Taints of the nodes where production workloads must be scheduled.
# Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
productionWorkloadTaints: ""
# Dockerhub Secrets for OCDR images
# If you don't create, this will be auto-created with default values.
dkubeDockerhubCredentialsSecret: "dkube-dockerhub-secret"
# AWS IAM role
# Valid only if KUBE_PROVIDER=eks
# This will be set as an annotation in few deployments
# Format should be like:
# IAMRole: "<key>: <iam role>"
# eg: IAMRole: "iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole"
IAMRole: ""

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# CRD README.md
templates/crds/README.md

View File

@ -0,0 +1,19 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: External Secrets Operator
catalog.cattle.io/release-name: external-secrets-operator
apiVersion: v2
appVersion: v0.5.6
description: External secret management for Kubernetes
home: https://github.com/external-secrets/external-secrets
icon: https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png
keywords:
- kubernetes-external-secrets
- secrets
kubeVersion: '>= 1.19.0-0'
maintainers:
- email: kellinmcavoy@gmail.com
name: mcavoyk
name: external-secrets
type: application
version: 0.5.600

View File

@ -0,0 +1,146 @@
# External Secrets
<p align="left"><img src="https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png" width="100x" /></p>
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.6](https://img.shields.io/badge/Version-0.5.6-informational?style=flat-square)
External secret management for Kubernetes
## TL;DR
```bash
helm repo add external-secrets https://charts.external-secrets.io
helm install external-secrets/external-secrets
```
## Installing the Chart
To install the chart with the release name `external-secrets`:
```bash
helm install external-secrets external-secrets/external-secrets
```
### Custom Resources
By default, the chart will install external-secrets CRDs, this can be controlled with `installCRDs` value.
## Uninstalling the Chart
To uninstall the `external-secrets` deployment:
```bash
helm uninstall external-secrets
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| certController.affinity | object | `{}` | |
| certController.create | bool | `true` | Specifies whether a certificate controller deployment be created. |
| certController.deploymentAnnotations | object | `{}` | Annotations to add to Deployment |
| certController.extraArgs | object | `{}` | |
| certController.extraEnv | list | `[]` | |
| certController.fullnameOverride | string | `""` | |
| certController.image.pullPolicy | string | `"IfNotPresent"` | |
| certController.image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | |
| certController.image.tag | string | `""` | |
| certController.imagePullSecrets | list | `[]` | |
| certController.nameOverride | string | `""` | |
| certController.nodeSelector | object | `{}` | |
| certController.podAnnotations | object | `{}` | Annotations to add to Pod |
| certController.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
| certController.podLabels | object | `{}` | |
| certController.podSecurityContext | object | `{}` | |
| certController.priorityClassName | string | `""` | Pod priority class name. |
| certController.prometheus.enabled | bool | `false` | deprecated. will be removed with 0.7.0, use serviceMonitor instead |
| certController.prometheus.service.port | int | `8080` | deprecated. will be removed with 0.7.0, use serviceMonitor instead |
| certController.rbac.create | bool | `true` | Specifies whether role and rolebinding resources should be created. |
| certController.replicaCount | int | `1` | |
| certController.requeueInterval | string | `"5m"` | |
| certController.resources | object | `{}` | |
| certController.securityContext | object | `{}` | |
| certController.serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
| certController.serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
| certController.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. |
| certController.serviceMonitor.additionalLabels | object | `{}` | Additional labels |
| certController.serviceMonitor.enabled | bool | `false` | Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics |
| certController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
| certController.serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval |
| certController.tolerations | list | `[]` | |
| concurrent | int | `1` | Specifies the number of concurrent ExternalSecret Reconciles external-secret executes at a time. |
| controllerClass | string | `""` | If set external secrets will filter matching Secret Stores with the appropriate controller values. |
| crds.createClusterExternalSecret | bool | `true` | If true, create CRDs for Cluster External Secret. |
| crds.createClusterSecretStore | bool | `true` | If true, create CRDs for Cluster Secret Store. |
| createOperator | bool | `true` | Specifies whether an external secret operator deployment be created. |
| deploymentAnnotations | object | `{}` | Annotations to add to Deployment |
| extraArgs | object | `{}` | |
| extraEnv | list | `[]` | |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | |
| image.tag | string | `""` | The image tag to use. The default is the chart appVersion. |
| imagePullSecrets | list | `[]` | |
| installCRDs | bool | `true` | If set, install and upgrade CRDs through helm chart. |
| leaderElect | bool | `false` | If true, external-secrets will perform leader election between instances to ensure no more than one instance of external-secrets operates at a time. |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | Annotations to add to Pod |
| podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
| podLabels | object | `{}` | |
| podSecurityContext | object | `{}` | |
| priorityClassName | string | `""` | Pod priority class name. |
| processClusterExternalSecret | bool | `true` | if true, the operator will process cluster external secret. Else, it will ignore them. |
| processClusterStore | bool | `true` | if true, the operator will process cluster store. Else, it will ignore them. |
| prometheus.enabled | bool | `false` | deprecated. will be removed with 0.7.0, use serviceMonitor instead. |
| prometheus.service.port | int | `8080` | deprecated. will be removed with 0.7.0, use serviceMonitor instead. |
| rbac.create | bool | `true` | Specifies whether role and rolebinding resources should be created. |
| replicaCount | int | `1` | |
| resources | object | `{}` | |
| scopedNamespace | string | `""` | If set external secrets are only reconciled in the provided namespace |
| scopedRBAC | bool | `false` | Must be used with scopedNamespace. If true, create scoped RBAC roles under the scoped namespace and implicitly disable cluster stores and cluster external secrets |
| securityContext | object | `{}` | |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. |
| serviceMonitor.additionalLabels | object | `{}` | Additional labels |
| serviceMonitor.enabled | bool | `false` | Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics |
| serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
| serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval |
| tolerations | list | `[]` | |
| webhook.affinity | object | `{}` | |
| webhook.certCheckInterval | string | `"5m"` | |
| webhook.certDir | string | `"/tmp/certs"` | |
| webhook.create | bool | `true` | Specifies whether a webhook deployment be created. |
| webhook.deploymentAnnotations | object | `{}` | Annotations to add to Deployment |
| webhook.extraArgs | object | `{}` | |
| webhook.extraEnv | list | `[]` | |
| webhook.failurePolicy | string | `"Fail"` | specifies whether validating webhooks should be created with failurePolicy: Fail or Ignore |
| webhook.fullnameOverride | string | `""` | |
| webhook.hostNetwork | bool | `false` | Specifies if webhook pod should use hostNetwork or not. |
| webhook.image.pullPolicy | string | `"IfNotPresent"` | |
| webhook.image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | |
| webhook.image.tag | string | `""` | The image tag to use. The default is the chart appVersion. |
| webhook.imagePullSecrets | list | `[]` | |
| webhook.nameOverride | string | `""` | |
| webhook.nodeSelector | object | `{}` | |
| webhook.podAnnotations | object | `{}` | Annotations to add to Pod |
| webhook.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
| webhook.podLabels | object | `{}` | |
| webhook.podSecurityContext | object | `{}` | |
| webhook.port | int | `10250` | The port the webhook will listen to |
| webhook.priorityClassName | string | `""` | Pod priority class name. |
| webhook.prometheus.enabled | bool | `false` | deprecated. will be removed with 0.7.0, use serviceMonitor instead |
| webhook.prometheus.service.port | int | `8080` | deprecated. will be removed with 0.7.0, use serviceMonitor instead |
| webhook.rbac.create | bool | `true` | Specifies whether role and rolebinding resources should be created. |
| webhook.replicaCount | int | `1` | |
| webhook.resources | object | `{}` | |
| webhook.secretAnnotations | object | `{}` | Annotations to add to Secret |
| webhook.securityContext | object | `{}` | |
| webhook.serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
| webhook.serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
| webhook.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. |
| webhook.serviceMonitor.additionalLabels | object | `{}` | Additional labels |
| webhook.serviceMonitor.enabled | bool | `false` | Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics |
| webhook.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
| webhook.serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval |
| webhook.tolerations | list | `[]` | |

View File

@ -0,0 +1,36 @@
{{- $valuesYAML := "https://github.com/external-secrets/external-secrets/blob/master/deploy/charts/external-secrets/values.yaml" -}}
{{- $chartRepo := "https://charts.external-secrets.io" -}}
{{- $org := "external-secrets" -}}
# External Secrets
<p align="left"><img src="https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png" width="100x" /></p>
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
{{ template "chart.typeBadge" . }}{{ template "chart.versionBadge" . }}
{{ template "chart.description" . }}
## TL;DR
```bash
helm repo add {{ $org }} {{ $chartRepo }}
helm install {{ $org }}/{{ template "chart.name" . }}
```
## Installing the Chart
To install the chart with the release name `{{ template "chart.name" . }}`:
```bash
helm install {{ template "chart.name" . }} {{ $org }}/{{ template "chart.name" . }}
```
### Custom Resources
By default, the chart will install external-secrets CRDs, this can be controlled with `installCRDs` value.
## Uninstalling the Chart
To uninstall the `{{ template "chart.name" . }}` deployment:
```bash
helm uninstall {{ template "chart.name" . }}
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
{{ template "chart.valuesSection" . }}

View File

@ -0,0 +1,7 @@
**External Secrets Operator** is a Kubernetes operator that integrates external secret management systems like [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/), [HashiCorp Vault](https://www.vaultproject.io/), [Google Secrets Manager](https://cloud.google.com/secret-manager), [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) and many more.
The operator reads information from external APIs and automatically injects the values into a [Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/).
### What is the goal of External Secrets Operator?
The goal of External Secrets Operator is to synchronize secrets from external APIs into Kubernetes. ESO is a collection of custom API resources - `ExternalSecret`, `SecretStore` and `ClusterSecretStore` that provide a user-friendly abstraction for the external API that stores and manages the lifecycle of the secrets for you.

View File

@ -0,0 +1,2 @@
image:
tag: main

View File

@ -0,0 +1,8 @@
questions:
- variable: installCRDs
default: false
required: true
description: "If true, Install and upgrade CRDs through helm chart"
type: boolean
label: Install CRDs

View File

@ -0,0 +1,13 @@
external-secrets has been deployed successfully!
In order to begin using ExternalSecrets, you will need to set up a SecretStore
or ClusterSecretStore resource (for example, by creating a 'vault' SecretStore).
More information on the different types of SecretStores and how to configure them
can be found in our Github: {{ .Chart.Home }}
{{ if .Values.prometheus.enabled -}}
deprecation warning:
> The flag `prometheus.enabled` is deprecated and will be removed in the next release.
Please migrate to using servicemonitor instead.
{{ end }}

View File

@ -0,0 +1,110 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "external-secrets.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "external-secrets.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "external-secrets.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "external-secrets.labels" -}}
helm.sh/chart: {{ include "external-secrets.chart" . }}
{{ include "external-secrets.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{- define "external-secrets-webhook.labels" -}}
helm.sh/chart: {{ include "external-secrets.chart" . }}
{{ include "external-secrets-webhook.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{- define "external-secrets-cert-controller.labels" -}}
helm.sh/chart: {{ include "external-secrets.chart" . }}
{{ include "external-secrets-cert-controller.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "external-secrets.selectorLabels" -}}
app.kubernetes.io/name: {{ include "external-secrets.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "external-secrets-webhook.selectorLabels" -}}
app.kubernetes.io/name: {{ include "external-secrets.name" . }}-webhook
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "external-secrets-cert-controller.selectorLabels" -}}
app.kubernetes.io/name: {{ include "external-secrets.name" . }}-cert-controller
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "external-secrets.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "external-secrets.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "external-secrets-webhook.serviceAccountName" -}}
{{- if .Values.webhook.serviceAccount.create }}
{{- default "external-secrets-webhook" .Values.webhook.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.webhook.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "external-secrets-cert-controller.serviceAccountName" -}}
{{- if .Values.certController.serviceAccount.create }}
{{- default "external-secrets-cert-controller" .Values.certController.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.certController.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,94 @@
{{- if .Values.certController.create }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
{{- with .Values.certController.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.certController.replicaCount }}
selector:
matchLabels:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.certController.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 8 }}
{{- with .Values.certController.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.certController.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "external-secrets-cert-controller.serviceAccountName" . }}
{{- with .Values.certController.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: cert-controller
{{- with .Values.certController.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.certController.image.repository }}:{{ .Values.certController.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.certController.image.pullPolicy }}
args:
- certcontroller
- --crd-requeue-interval={{ .Values.certController.requeueInterval }}
- --service-name={{ include "external-secrets.fullname" . }}-webhook
- --service-namespace={{ .Release.Namespace }}
- --secret-name={{ include "external-secrets.fullname" . }}-webhook
- --secret-namespace={{ .Release.Namespace }}
{{- range $key, $value := .Values.certController.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
ports:
- containerPort: {{ .Values.certController.prometheus.service.port }}
protocol: TCP
name: metrics
readinessProbe:
httpGet:
port: 8081
path: /readyz
initialDelaySeconds: 20
periodSeconds: 5
{{- with .Values.certController.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.certController.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.certController.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.certController.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.certController.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.certController.priorityClassName }}
priorityClassName: {{ .Values.certController.priorityClassName }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if and .Values.certController.create .Values.certController.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller-pdb
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
spec:
{{- if .Values.certController.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.certController.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.certController.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.certController.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,69 @@
{{- if and .Values.certController.create .Values.certController.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
rules:
- apiGroups:
- "apiextensions.k8s.io"
resources:
- "customresourcedefinitions"
verbs:
- "get"
- "list"
- "watch"
- "update"
- "patch"
- apiGroups:
- "admissionregistration.k8s.io"
resources:
- "validatingwebhookconfigurations"
verbs:
- "get"
- "list"
- "watch"
- "update"
- "patch"
- apiGroups:
- ""
resources:
- "endpoints"
verbs:
- "list"
- "get"
- "watch"
- apiGroups:
- ""
resources:
- "events"
verbs:
- "create"
- "patch"
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "get"
- "list"
- "watch"
- "update"
- "patch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "external-secrets.fullname" . }}-cert-controller
subjects:
- name: {{ include "external-secrets-cert-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
kind: ServiceAccount
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and .Values.certController.create .Values.certController.prometheus.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
annotations:
prometheus.io/path: "/metrics"
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.certController.prometheus.service.port | quote }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.certController.prometheus.service.port }}
protocol: TCP
name: metrics
selector:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if and .Values.certController.create .Values.certController.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "external-secrets-cert-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
{{- with .Values.certController.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if and .Values.certController.create .Values.certController.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics
labels:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
name: metrics
selector:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 4 }}
---
apiVersion: "monitoring.coreos.com/v1"
kind: ServiceMonitor
metadata:
labels:
{{- include "external-secrets-cert-controller.labels" . | nindent 4 }}
{{- if .Values.certController.serviceMonitor.additionalLabels }}
{{ toYaml .Values.certController.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics
namespace: {{ .Release.Namespace | quote }}
spec:
selector:
matchLabels:
{{- include "external-secrets-cert-controller.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: metrics
interval: {{ .Values.certController.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.certController.serviceMonitor.scrapeTimeout }}
{{- end }}

View File

@ -0,0 +1,333 @@
{{- if and (.Values.installCRDs) (.Values.crds.createClusterExternalSecret) }}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: clusterexternalsecrets.external-secrets.io
spec:
group: external-secrets.io
names:
categories:
- externalsecrets
kind: ClusterExternalSecret
listKind: ClusterExternalSecretList
plural: clusterexternalsecrets
shortNames:
- ces
singular: clusterexternalsecret
scope: Cluster
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: ClusterExternalSecret is the Schema for the clusterexternalsecrets API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ClusterExternalSecretSpec defines the desired state of ClusterExternalSecret.
properties:
externalSecretName:
description: The name of the external secrets to be created defaults to the name of the ClusterExternalSecret
type: string
externalSecretSpec:
description: The spec for the ExternalSecrets to be created
properties:
data:
description: Data defines the connection between the Kubernetes Secret keys and the Provider data
items:
description: ExternalSecretData defines the connection between the Kubernetes Secret key (spec.data.<key>) and the Provider data.
properties:
remoteRef:
description: ExternalSecretDataRemoteRef defines Provider data location.
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
metadataPolicy:
description: Policy for fetching tags/labels from provider secrets, possible options are Fetch, None. Defaults to None
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
secretKey:
type: string
required:
- remoteRef
- secretKey
type: object
type: array
dataFrom:
description: DataFrom is used to fetch all properties from a specific Provider data If multiple entries are specified, the Secret keys are merged in the specified order
items:
maxProperties: 1
minProperties: 1
properties:
extract:
description: Used to extract multiple key/value pairs from one secret
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
metadataPolicy:
description: Policy for fetching tags/labels from provider secrets, possible options are Fetch, None. Defaults to None
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
find:
description: Used to find secrets based on tags or regular expressions
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
name:
description: Finds secrets based on the name.
properties:
regexp:
description: Finds secrets base
type: string
type: object
path:
description: A root path to start the find operations.
type: string
tags:
additionalProperties:
type: string
description: Find secrets based on tags.
type: object
type: object
type: object
type: array
refreshInterval:
default: 1h
description: RefreshInterval is the amount of time before the values are read again from the SecretStore provider Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" May be set to zero to fetch and create it once. Defaults to 1h.
type: string
secretStoreRef:
description: SecretStoreRef defines which SecretStore to fetch the ExternalSecret data.
properties:
kind:
description: Kind of the SecretStore resource (SecretStore or ClusterSecretStore) Defaults to `SecretStore`
type: string
name:
description: Name of the SecretStore resource
type: string
required:
- name
type: object
target:
description: ExternalSecretTarget defines the Kubernetes Secret to be created There can be only one target per ExternalSecret.
properties:
creationPolicy:
default: Owner
description: CreationPolicy defines rules on how to create the resulting Secret Defaults to 'Owner'
enum:
- Owner
- Orphan
- Merge
- None
type: string
deletionPolicy:
default: Retain
description: DeletionPolicy defines rules on how to delete the resulting Secret Defaults to 'Retain'
enum:
- Delete
- Merge
- Retain
type: string
immutable:
description: Immutable defines if the final secret will be immutable
type: boolean
name:
description: Name defines the name of the Secret resource to be managed This field is immutable Defaults to the .metadata.name of the ExternalSecret resource
type: string
template:
description: Template defines a blueprint for the created Secret resource.
properties:
data:
additionalProperties:
type: string
type: object
engineVersion:
default: v2
type: string
metadata:
description: ExternalSecretTemplateMetadata defines metadata fields for the Secret blueprint.
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
templateFrom:
items:
maxProperties: 1
minProperties: 1
properties:
configMap:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
secret:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
type: object
type: array
type:
type: string
type: object
type: object
required:
- secretStoreRef
type: object
namespaceSelector:
description: The labels to select by to find the Namespaces to create the ExternalSecrets in.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
refreshTime:
description: The time in which the controller should reconcile it's objects and recheck namespaces for labels.
type: string
required:
- externalSecretSpec
- namespaceSelector
type: object
status:
description: ClusterExternalSecretStatus defines the observed state of ClusterExternalSecret.
properties:
conditions:
items:
properties:
message:
type: string
status:
type: string
type:
type: string
required:
- status
- type
type: object
type: array
failedNamespaces:
description: Failed namespaces are the namespaces that failed to apply an ExternalSecret
items:
description: ClusterExternalSecretNamespaceFailure represents a failed namespace deployment and it's reason.
properties:
namespace:
description: Namespace is the namespace that failed when trying to apply an ExternalSecret
type: string
reason:
description: Reason is why the ExternalSecret failed to apply to the namespace
type: string
required:
- namespace
type: object
type: array
provisionedNamespaces:
description: ProvisionedNamespaces are the namespaces where the ClusterExternalSecret has secrets
items:
type: string
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}
conversion:
strategy: Webhook
webhook:
conversionReviewVersions:
- v1
clientConfig:
service:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
path: /convert
{{- end }}

View File

@ -0,0 +1,508 @@
{{- if .Values.installCRDs }}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: externalsecrets.external-secrets.io
spec:
group: external-secrets.io
names:
categories:
- externalsecrets
kind: ExternalSecret
listKind: ExternalSecretList
plural: externalsecrets
shortNames:
- es
singular: externalsecret
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.secretStoreRef.name
name: Store
type: string
- jsonPath: .spec.refreshInterval
name: Refresh Interval
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].reason
name: Status
type: string
deprecated: true
name: v1alpha1
schema:
openAPIV3Schema:
description: ExternalSecret is the Schema for the external-secrets API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ExternalSecretSpec defines the desired state of ExternalSecret.
properties:
data:
description: Data defines the connection between the Kubernetes Secret keys and the Provider data
items:
description: ExternalSecretData defines the connection between the Kubernetes Secret key (spec.data.<key>) and the Provider data.
properties:
remoteRef:
description: ExternalSecretDataRemoteRef defines Provider data location.
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
secretKey:
type: string
required:
- remoteRef
- secretKey
type: object
type: array
dataFrom:
description: DataFrom is used to fetch all properties from a specific Provider data If multiple entries are specified, the Secret keys are merged in the specified order
items:
description: ExternalSecretDataRemoteRef defines Provider data location.
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
type: array
refreshInterval:
default: 1h
description: RefreshInterval is the amount of time before the values are read again from the SecretStore provider Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" May be set to zero to fetch and create it once. Defaults to 1h.
type: string
secretStoreRef:
description: SecretStoreRef defines which SecretStore to fetch the ExternalSecret data.
properties:
kind:
description: Kind of the SecretStore resource (SecretStore or ClusterSecretStore) Defaults to `SecretStore`
type: string
name:
description: Name of the SecretStore resource
type: string
required:
- name
type: object
target:
description: ExternalSecretTarget defines the Kubernetes Secret to be created There can be only one target per ExternalSecret.
properties:
creationPolicy:
default: Owner
description: CreationPolicy defines rules on how to create the resulting Secret Defaults to 'Owner'
type: string
immutable:
description: Immutable defines if the final secret will be immutable
type: boolean
name:
description: Name defines the name of the Secret resource to be managed This field is immutable Defaults to the .metadata.name of the ExternalSecret resource
type: string
template:
description: Template defines a blueprint for the created Secret resource.
properties:
data:
additionalProperties:
type: string
type: object
engineVersion:
default: v1
description: EngineVersion specifies the template engine version that should be used to compile/execute the template specified in .data and .templateFrom[].
type: string
metadata:
description: ExternalSecretTemplateMetadata defines metadata fields for the Secret blueprint.
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
templateFrom:
items:
maxProperties: 1
minProperties: 1
properties:
configMap:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
secret:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
type: object
type: array
type:
type: string
type: object
type: object
required:
- secretStoreRef
- target
type: object
status:
properties:
conditions:
items:
properties:
lastTransitionTime:
format: date-time
type: string
message:
type: string
reason:
type: string
status:
type: string
type:
type: string
required:
- status
- type
type: object
type: array
refreshTime:
description: refreshTime is the time and date the external secret was fetched and the target secret updated
format: date-time
nullable: true
type: string
syncedResourceVersion:
description: SyncedResourceVersion keeps track of the last synced version
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.secretStoreRef.name
name: Store
type: string
- jsonPath: .spec.refreshInterval
name: Refresh Interval
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].reason
name: Status
type: string
name: v1beta1
schema:
openAPIV3Schema:
description: ExternalSecret is the Schema for the external-secrets API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ExternalSecretSpec defines the desired state of ExternalSecret.
properties:
data:
description: Data defines the connection between the Kubernetes Secret keys and the Provider data
items:
description: ExternalSecretData defines the connection between the Kubernetes Secret key (spec.data.<key>) and the Provider data.
properties:
remoteRef:
description: ExternalSecretDataRemoteRef defines Provider data location.
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
metadataPolicy:
description: Policy for fetching tags/labels from provider secrets, possible options are Fetch, None. Defaults to None
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
secretKey:
type: string
required:
- remoteRef
- secretKey
type: object
type: array
dataFrom:
description: DataFrom is used to fetch all properties from a specific Provider data If multiple entries are specified, the Secret keys are merged in the specified order
items:
maxProperties: 1
minProperties: 1
properties:
extract:
description: Used to extract multiple key/value pairs from one secret
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
key:
description: Key is the key used in the Provider, mandatory
type: string
metadataPolicy:
description: Policy for fetching tags/labels from provider secrets, possible options are Fetch, None. Defaults to None
type: string
property:
description: Used to select a specific property of the Provider value (if a map), if supported
type: string
version:
description: Used to select a specific version of the Provider value, if supported
type: string
required:
- key
type: object
find:
description: Used to find secrets based on tags or regular expressions
properties:
conversionStrategy:
default: Default
description: Used to define a conversion Strategy
type: string
name:
description: Finds secrets based on the name.
properties:
regexp:
description: Finds secrets base
type: string
type: object
path:
description: A root path to start the find operations.
type: string
tags:
additionalProperties:
type: string
description: Find secrets based on tags.
type: object
type: object
type: object
type: array
refreshInterval:
default: 1h
description: RefreshInterval is the amount of time before the values are read again from the SecretStore provider Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" May be set to zero to fetch and create it once. Defaults to 1h.
type: string
secretStoreRef:
description: SecretStoreRef defines which SecretStore to fetch the ExternalSecret data.
properties:
kind:
description: Kind of the SecretStore resource (SecretStore or ClusterSecretStore) Defaults to `SecretStore`
type: string
name:
description: Name of the SecretStore resource
type: string
required:
- name
type: object
target:
description: ExternalSecretTarget defines the Kubernetes Secret to be created There can be only one target per ExternalSecret.
properties:
creationPolicy:
default: Owner
description: CreationPolicy defines rules on how to create the resulting Secret Defaults to 'Owner'
enum:
- Owner
- Orphan
- Merge
- None
type: string
deletionPolicy:
default: Retain
description: DeletionPolicy defines rules on how to delete the resulting Secret Defaults to 'Retain'
enum:
- Delete
- Merge
- Retain
type: string
immutable:
description: Immutable defines if the final secret will be immutable
type: boolean
name:
description: Name defines the name of the Secret resource to be managed This field is immutable Defaults to the .metadata.name of the ExternalSecret resource
type: string
template:
description: Template defines a blueprint for the created Secret resource.
properties:
data:
additionalProperties:
type: string
type: object
engineVersion:
default: v2
type: string
metadata:
description: ExternalSecretTemplateMetadata defines metadata fields for the Secret blueprint.
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
templateFrom:
items:
maxProperties: 1
minProperties: 1
properties:
configMap:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
secret:
properties:
items:
items:
properties:
key:
type: string
required:
- key
type: object
type: array
name:
type: string
required:
- items
- name
type: object
type: object
type: array
type:
type: string
type: object
type: object
required:
- secretStoreRef
type: object
status:
properties:
conditions:
items:
properties:
lastTransitionTime:
format: date-time
type: string
message:
type: string
reason:
type: string
status:
type: string
type:
type: string
required:
- status
- type
type: object
type: array
refreshTime:
description: refreshTime is the time and date the external secret was fetched and the target secret updated
format: date-time
nullable: true
type: string
syncedResourceVersion:
description: SyncedResourceVersion keeps track of the last synced version
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
conversion:
strategy: Webhook
webhook:
conversionReviewVersions:
- v1
clientConfig:
service:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
path: /convert
{{- end }}

View File

@ -0,0 +1,107 @@
{{- if .Values.createOperator }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "external-secrets.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
{{- with .Values.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "external-secrets.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "external-secrets.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "external-secrets.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or (.Values.leaderElect) (.Values.scopedNamespace) (.Values.processClusterStore) (.Values.processClusterExternalSecret) (.Values.concurrent) (.Values.extraArgs) }}
args:
{{- if .Values.leaderElect }}
- --enable-leader-election=true
{{- end }}
{{- if .Values.scopedNamespace }}
- --namespace={{ .Values.scopedNamespace }}
{{- end }}
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
- --enable-cluster-store-reconciler=false
- --enable-cluster-external-secret-reconciler=false
{{- else }}
{{- if not .Values.processClusterStore }}
- --enable-cluster-store-reconciler=false
{{- end }}
{{- if not .Values.processClusterExternalSecret }}
- --enable-cluster-external-secret-reconciler=false
{{- end }}
{{- end }}
{{- if .Values.controllerClass }}
- --controller-class={{ .Values.controllerClass }}
{{- end }}
{{- if .Values.concurrent }}
- --concurrent={{ .Values.concurrent }}
{{- end }}
{{- range $key, $value := .Values.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
{{- end }}
ports:
- containerPort: {{ .Values.prometheus.service.port }}
protocol: TCP
name: metrics
{{- with .Values.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if .Values.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "external-secrets.fullname" . }}-pdb
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "external-secrets.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,227 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
name: {{ include "external-secrets.fullname" . }}-controller
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
namespace: {{ .Values.scopedNamespace | quote }}
{{- end }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
rules:
- apiGroups:
- "external-secrets.io"
resources:
- "secretstores"
- "clustersecretstores"
- "externalsecrets"
- "clusterexternalsecrets"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- "external-secrets.io"
resources:
- "externalsecrets"
- "externalsecrets/status"
- "externalsecrets/finalizers"
- "secretstores"
- "secretstores/status"
- "secretstores/finalizers"
- "clustersecretstores"
- "clustersecretstores/status"
- "clustersecretstores/finalizers"
- "clusterexternalsecrets"
- "clusterexternalsecrets/status"
- "clusterexternalsecrets/finalizers"
verbs:
- "update"
- "patch"
- apiGroups:
- ""
resources:
- "serviceaccounts"
- "namespaces"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "get"
- "list"
- "watch"
- "create"
- "update"
- "delete"
- "patch"
- apiGroups:
- ""
resources:
- "serviceaccounts/token"
verbs:
- "create"
- apiGroups:
- ""
resources:
- "events"
verbs:
- "create"
- "patch"
- apiGroups:
- "external-secrets.io"
resources:
- "externalsecrets"
verbs:
- "create"
- "update"
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
name: {{ include "external-secrets.fullname" . }}-view
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
namespace: {{ .Values.scopedNamespace | quote }}
{{- end }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- "external-secrets.io"
resources:
- "externalsecrets"
- "secretstores"
- "clustersecretstores"
verbs:
- "get"
- "watch"
- "list"
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
name: {{ include "external-secrets.fullname" . }}-edit
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
namespace: {{ .Values.scopedNamespace | quote }}
{{- end }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- "external-secrets.io"
resources:
- "externalsecrets"
- "secretstores"
- "clustersecretstores"
verbs:
- "create"
- "delete"
- "deletecollection"
- "patch"
- "update"
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
kind: RoleBinding
{{- else }}
kind: ClusterRoleBinding
{{- end }}
metadata:
name: {{ include "external-secrets.fullname" . }}-controller
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
namespace: {{ .Values.scopedNamespace | quote }}
{{- end }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if and .Values.scopedNamespace .Values.scopedRBAC }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
name: {{ include "external-secrets.fullname" . }}-controller
subjects:
- name: {{ include "external-secrets.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "external-secrets.fullname" . }}-leaderelection
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- "configmaps"
resourceNames:
- "external-secrets-controller"
verbs:
- "get"
- "update"
- "patch"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "create"
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "get"
- "create"
- "update"
- "patch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "external-secrets.fullname" . }}-leaderelection
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "external-secrets.fullname" . }}-leaderelection
subjects:
- kind: ServiceAccount
name: {{ include "external-secrets.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.prometheus.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-metrics
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
annotations:
prometheus.io/path: "/metrics"
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.prometheus.service.port | quote }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.prometheus.service.port }}
protocol: TCP
name: metrics
selector:
{{- include "external-secrets.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "external-secrets.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,39 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-metrics
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets.selectorLabels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
name: metrics
selector:
{{- include "external-secrets.selectorLabels" . | nindent 4 }}
---
apiVersion: "monitoring.coreos.com/v1"
kind: ServiceMonitor
metadata:
labels:
{{- include "external-secrets.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
name: {{ include "external-secrets.fullname" . }}-metrics
namespace: {{ .Release.Namespace | quote }}
spec:
selector:
matchLabels:
{{- include "external-secrets.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: metrics
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}

View File

@ -0,0 +1,64 @@
{{- if .Values.webhook.create }}
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: secretstore-validate
labels:
external-secrets.io/component: webhook
webhooks:
- name: "validate.secretstore.external-secrets.io"
rules:
- apiGroups: ["external-secrets.io"]
apiVersions: ["v1beta1"]
operations: ["CREATE", "UPDATE", "DELETE"]
resources: ["secretstores"]
scope: "Namespaced"
clientConfig:
service:
namespace: {{ .Release.Namespace | quote }}
name: {{ include "external-secrets.fullname" . }}-webhook
path: /validate-external-secrets-io-v1beta1-secretstore
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
timeoutSeconds: 5
- name: "validate.clustersecretstore.external-secrets.io"
rules:
- apiGroups: ["external-secrets.io"]
apiVersions: ["v1beta1"]
operations: ["CREATE", "UPDATE", "DELETE"]
resources: ["clustersecretstores"]
scope: "Cluster"
clientConfig:
service:
namespace: {{ .Release.Namespace | quote }}
name: {{ include "external-secrets.fullname" . }}-webhook
path: /validate-external-secrets-io-v1beta1-clustersecretstore
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
timeoutSeconds: 5
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: externalsecret-validate
labels:
external-secrets.io/component: webhook
webhooks:
- name: "validate.externalsecret.external-secrets.io"
rules:
- apiGroups: ["external-secrets.io"]
apiVersions: ["v1beta1"]
operations: ["CREATE", "UPDATE", "DELETE"]
resources: ["externalsecrets"]
scope: "Namespaced"
clientConfig:
service:
namespace: {{ .Release.Namespace | quote }}
name: {{ include "external-secrets.fullname" . }}-webhook
path: /validate-external-secrets-io-v1beta1-externalsecret
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
timeoutSeconds: 5
failurePolicy: {{ .Values.webhook.failurePolicy}}
{{- end }}

View File

@ -0,0 +1,105 @@
{{- if .Values.webhook.create }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
{{- with .Values.webhook.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.webhook.replicaCount }}
selector:
matchLabels:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.webhook.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 8 }}
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.webhook.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
hostNetwork: {{ .Values.webhook.hostNetwork}}
serviceAccountName: {{ include "external-secrets-webhook.serviceAccountName" . }}
{{- with .Values.webhook.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: webhook
{{- with .Values.webhook.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.webhook.image.repository }}:{{ .Values.webhook.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
args:
- webhook
- --port={{ .Values.webhook.port }}
- --dns-name={{ include "external-secrets.fullname" . }}-webhook.{{ .Release.Namespace }}.svc
- --cert-dir={{ .Values.webhook.certDir }}
- --check-interval={{ .Values.webhook.certCheckInterval }}
{{- range $key, $value := .Values.webhook.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
ports:
- containerPort: {{ .Values.webhook.prometheus.service.port }}
protocol: TCP
name: metrics
- containerPort: {{ .Values.webhook.port }}
protocol: TCP
name: webhook
readinessProbe:
httpGet:
port: 8081
path: /readyz
initialDelaySeconds: 20
periodSeconds: 5
{{- with .Values.webhook.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.webhook.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: certs
mountPath: {{ .Values.webhook.certDir }}
readOnly: true
volumes:
- name: certs
secret:
secretName: {{ include "external-secrets.fullname" . }}-webhook
{{- with .Values.webhook.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.webhook.priorityClassName }}
priorityClassName: {{ .Values.webhook.priorityClassName }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and .Values.webhook.create .Values.webhook.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "external-secrets.fullname" . }}-webhook-pdb
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
external-secrets.io/component : webhook
spec:
{{- if .Values.webhook.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.webhook.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.webhook.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.webhook.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if .Values.webhook.create }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
external-secrets.io/component : webhook
{{- with .Values.webhook.secretAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,31 @@
{{- if .Values.webhook.create }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
external-secrets.io/component : webhook
{{- if .Values.webhook.prometheus.enabled}}
annotations:
prometheus.io/path: "/metrics"
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.prometheus.service.port | quote }}
{{- end }}
spec:
type: ClusterIP
ports:
- port: 443
targetPort: {{ .Values.webhook.port }}
protocol: TCP
name: webhook
{{- if .Values.webhook.prometheus.enabled}}
- port: {{ .Values.webhook.prometheus.service.port}}
targetPort: {{ .Values.webhook.prometheus.service.port}}
protocol: TCP
name: metrics
{{- end }}
selector:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if and .Values.webhook.create .Values.webhook.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "external-secrets-webhook.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
{{- with .Values.webhook.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if and .Values.webhook.create .Values.webhook.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "external-secrets.fullname" . }}-webhook-metrics
labels:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
name: metrics
selector:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 4 }}
---
apiVersion: "monitoring.coreos.com/v1"
kind: ServiceMonitor
metadata:
labels:
{{- include "external-secrets-webhook.labels" . | nindent 4 }}
{{- if .Values.webhook.serviceMonitor.additionalLabels }}
{{ toYaml .Values.webhook.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
name: {{ include "external-secrets.fullname" . }}-webhook-metrics
namespace: {{ .Release.Namespace | quote }}
spec:
selector:
matchLabels:
{{- include "external-secrets-webhook.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: metrics
interval: {{ .Values.webhook.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.webhook.serviceMonitor.scrapeTimeout }}
{{- end }}

View File

@ -0,0 +1,313 @@
replicaCount: 1
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: IfNotPresent
# -- The image tag to use. The default is the chart appVersion.
tag: ""
# -- If set, install and upgrade CRDs through helm chart.
installCRDs: true
crds:
# -- If true, create CRDs for Cluster External Secret.
createClusterExternalSecret: true
# -- If true, create CRDs for Cluster Secret Store.
createClusterSecretStore: true
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- If true, external-secrets will perform leader election between instances to ensure no more
# than one instance of external-secrets operates at a time.
leaderElect: false
# -- If set external secrets will filter matching
# Secret Stores with the appropriate controller values.
controllerClass: ""
# -- If set external secrets are only reconciled in the
# provided namespace
scopedNamespace: ""
# -- Must be used with scopedNamespace. If true, create scoped RBAC roles under the scoped namespace
# and implicitly disable cluster stores and cluster external secrets
scopedRBAC: false
# -- if true, the operator will process cluster external secret. Else, it will ignore them.
processClusterExternalSecret: true
# -- if true, the operator will process cluster store. Else, it will ignore them.
processClusterStore: true
# -- Specifies whether an external secret operator deployment be created.
createOperator: true
# -- Specifies the number of concurrent ExternalSecret Reconciles external-secret executes at
# a time.
concurrent: 1
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Annotations to add to the service account.
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead.
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead.
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
nodeSelector: {}
tolerations: []
affinity: {}
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
webhook:
# -- Specifies whether a webhook deployment be created.
create: true
certCheckInterval: "5m"
replicaCount: 1
certDir: /tmp/certs
# -- specifies whether validating webhooks should be created with failurePolicy: Fail or Ignore
failurePolicy: Fail
# -- Specifies if webhook pod should use hostNetwork or not.
hostNetwork: false
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: IfNotPresent
# -- The image tag to use. The default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- The port the webhook will listen to
port: 10250
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Annotations to add to the service account.
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
nodeSelector: {}
tolerations: []
affinity: {}
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
# -- Annotations to add to Secret
secretAnnotations: {}
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
certController:
# -- Specifies whether a certificate controller deployment be created.
create: true
requeueInterval: "5m"
replicaCount: 1
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: IfNotPresent
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Annotations to add to the service account.
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
nodeSelector: {}
tolerations: []
affinity: {}
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# Helm files
OWNERS

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: TrilioVault for Kubernetes Operator
catalog.cattle.io/release-name: k8s-triliovault-operator
apiVersion: v2
appVersion: 2.9.3
dependencies:
- condition: observability.enabled
name: observability
repository: file://./charts/observability
description: K8s-TrilioVault-Operator is an operator designed to manage the K8s-TrilioVault
Application Lifecycle.
home: https://github.com/trilioData/k8s-triliovault-operator
icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png
kubeVersion: '>=1.19.0-0'
maintainers:
- email: prafull.ladha@trilio.io
name: prafull11
name: k8s-triliovault-operator
sources:
- https://github.com/trilioData/k8s-triliovault-operator
version: 2.9.300

View File

@ -0,0 +1 @@
# Placeholder for the License if we decide to provide one

View File

@ -0,0 +1,202 @@
# K8s-TrilioVault-Operator
This operator is to manage the lifecycle of TrilioVault Backup/Recovery solution. This operator install, updates and manage the TrilioVault application.
## Introduction
## Prerequisites
- Kubernetes 1.19+
- PV provisioner support
- CSI driver should be installed
### One Click Installation
In one click install for upstream operator, a cluster scope TVM custom resource `triliovault-manager` is created.
```shell script
helm repo add trilio-vault-operator https://charts.k8strilio.net/trilio-stable/k8s-triliovault-operator
helm install tvm trilio-vault-operator/k8s-triliovault-operator
```
#### One click install with preflight Configuration
The following table lists the configuration parameter of the upstream operator one click install feature as well as preflight check flags, their default values and usage.
| Parameter | Description | Default | Example |
|--------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|------------|-------------------------|
| `installTVK.enabled` | 1 click install feature is enabled | true | |
| `installTVK.applicationScope` | scope of TVK application created | Cluster | |
| `installTVK.tvkInstanceName` | tvk instance name | "" | "tvk-instance" |
| `installTVK.ingressConfig.host` | host of the ingress resource created | "" | |
| `installTVK.ingressConfig.tlsSecretName` | tls secret name which contains ingress certs | "" | |
| `installTVK.ingressConfig.annotations` | annotations to be added on ingress resource | "" | |
| `installTVK.ingressConfig.ingressClass` | ingress class name for the ingress resource | "" | |
| `installTVK.ComponentConfiguration.ingressController.enabled` | TVK ingress controller should be deployed | true | |
| `installTVK.ComponentConfiguration.ingressController.service.type` | TVK ingress controller service type | "NodePort" | |
| `preflight.enabled` | enables preflight check for tvk | false | |
| `preflight.storageClass` | Name of storage class to use for preflight checks (Required) | "" | |
| `preflight.cleanupOnFailure` | Cleanup the resources on cluster if preflight checks fail (Optional) | false | |
| `preflight.imagePullSecret` | Name of the secret for authentication while pulling the images from the local registry (Optional) | "" | |
| `preflight.limits` | Pod memory and cpu resource limits for DNS and volume snapshot preflight check (Optional) | "" | "cpu=600m,memory=256Mi" |
| `preflight.localRegistry` | Name of the local registry from where the images will be pulled (Optional) | "" | |
| `preflight.nodeSelector` | Node selector labels for pods to schedule on a specific nodes of cluster (Optional) | "" | "key=value" |
| `preflight.pvcStorageRequest` | PVC storage request for volume snapshot preflight check (Optional) | "" | "2Gi" |
| `preflight.requests` | Pod memory and cpu resource requests for DNS and volume snapshot preflight check (Optional) | "" | "cpu=300m,memory=128Mi" |
| `preflight.volumeSnapshotClass` | Name of volume snapshot class to use for preflight checks (Optional) | "" | |
| `preflight.logLevel` | Log Level for the preflight run (Default: "INFO") | "" | |
| `preflight.imageTag` | Image tag to use for the preflight image (Default: latest) | "" | |
Check the TVM CR configuration by running following command:
```
kubectl get triliovaultmanagers.triliovault.trilio.io triliovault-manager -o yaml
```
Once the operator pod is in running state, the TVK pods getting spawned. Confirm the [TVK pods are up](#Check-TVK-Install).
#### Note:
If preflight check is enabled and helm install fails, check pre-install helm hook pod logs for any failure in preflight check. Do the following steps:
First, run this command:
```
kubectl get pods -n <helm-release-namespace>
```
The pod name should start with `<helm-release-name>-preflight-job-preinstall-hook`. Check the logs of the pod by the following command:
```
kubectl logs -f <pod-name> -n <helm-release-namespace>
```
#### The failed preflight job is not cleaned up automatically right after failure. If the user cluster version is 1.21 and above, the job will be cleaned up after 1 hour so user should collect any failure logs within 1 hr of job failure. For cluster version below 1.21, user has to clean up failed preflight job manually.
To delete the job manually, run the following command:
```
kubectl delete job -f <job-name> -n <helm-release-namespace>
```
where job name should also start with `<helm-release-name>-preflight-job-preinstall-hook`
Also, due to a bug at helm side where auto deletion of resources upon failure doesn't work, user needs to clean the following resources left behind to be able to run preflight again, until the bug is fixed from their side, after which this step will be handled automatically. Run the following command to clean up the temporary resources:
1. Cleanup Service Account:
```
kubectl delete sa <helm-release-name>-preflight-service-account -n <helm-release-namespace>
```
2. Cleanup Cluster Role Binding:
```
kubectl delete clusterrolebinding <helm-release-name>-<helm-release-namespace>-preflight-rolebinding
```
3. Cleanup Cluster Role:
```
kubectl delete clusterrole <helm-release-name>-<helm-release-namespace>-preflight-role
```
## Manual Installation
To install the operator on local setup just run the latest helm charts inside this repo
```shell script
helm repo add trilio-vault-operator https://charts.k8strilio.net/trilio-stable/k8s-triliovault-operator
helm install tvm trilio-vault-operator/k8s-triliovault-operator
```
Now, create a TrilioVaultManager CR to install the TrilioVault for Kubernetes. You can provide the custom configurations for the TVK resources as follows:
```
apiVersion: triliovault.trilio.io/v1
kind: TrilioVaultManager
metadata:
labels:
triliovault: k8s
name: tvk
spec:
trilioVaultAppVersion: latest
applicationScope: Cluster
# User can configure tvk instance name
tvkInstanceName: tvk-instance
# User can configure the ingress hosts, annotations and TLS secret through the ingressConfig section
ingressConfig:
host: "trilio.co.in"
tlsSecretName: "secret-name"
# TVK components configuration, currently supports control-plane, web, exporter, web-backend, ingress-controller, admission-webhook.
# User can configure resources for all componentes and can configure service type and host for the ingress-controller
componentConfiguration:
web-backend:
resources:
requests:
memory: "400Mi"
cpu: "200m"
limits:
memory: "2584Mi"
cpu: "1000m"
ingress-controller:
enabled: true
service:
type: LoadBalancer
```
### Apply the Custom Resource
Apply `TVM.yaml`:
```shell
kubectl create -f TVM.yaml
```
### Check TVK Install
Check that the pods were created:
```
kubectl get pods
```
```
NAME READY STATUS RESTARTS AGE
k8s-triliovault-admission-webhook-6ff5f98c8-qwmfc 1/1 Running 0 81s
k8s-triliovault-backend-6f66b6b8d5-gxtmz 1/1 Running 0 81s
k8s-triliovault-control-plane-6c464c5d78-ftk6g 1/1 Running 0 81s
k8s-triliovault-exporter-59566f97dd-gs4xc 1/1 Running 0 81s
k8s-triliovault-ingress-nginx-controller-867c764cd5-qhpx6 1/1 Running 0 18s
k8s-triliovault-web-967c8475-m7pc6 1/1 Running 0 81s
tvm-k8s-triliovault-operator-66bd7d86d5-dvhzb 1/1 Running 0 6m48s
```
Check that ingress controller service is of type LoadBalancer:
```
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
k8s-triliovault-admission-webhook ClusterIP 10.7.243.24 <none> 443/TCP 129m
k8s-triliovault-ingress-nginx-controller LoadBalancer 10.7.246.193 35.203.155.148 80:30362/TCP,443:32327/TCP 129m
k8s-triliovault-ingress-nginx-controller-admission ClusterIP 10.7.250.31 <none> 443/TCP 129m
k8s-triliovault-web ClusterIP 10.7.254.41 <none> 80/TCP 129m
k8s-triliovault-web-backend ClusterIP 10.7.252.146 <none> 80/TCP 129m
tvm-k8s-triliovault-operator-webhook-service ClusterIP 10.7.248.163 <none> 443/TCP 130m 123m
```
Check that ingress resources has the host defined by the user:
```
NAME CLASS HOSTS ADDRESS PORTS AGE
k8s-triliovault k8s-triliovault-default-nginx * 35.203.155.148 80 129m
```
You can access the TVK UI by hitting this address in your browser: https://35.203.155.148
## Delete
```shell
kubectl delete -f TVM.yaml
```
## Uninstall
To uninstall/delete the operator helm chart :
```bash
helm uninstall tvm
```
## TrilioVaultManager compatibility
We maintain the version parity between the TrilioVaultManager(upstream operator) and TrilioVault for Kubernetes. Whenever
user wants to upgrade to the new version, should use the same version for upstream operator and Triliovault for Kubernetes.

View File

@ -0,0 +1,21 @@
apiVersion: v2
appVersion: 0.1.0
dependencies:
- name: visualization
repository: file://charts/visualization
version: ^0.1.0
- name: logging
repository: file://charts/logging
version: ^0.1.0
- name: monitoring
repository: file://charts/monitoring
version: ^0.1.0
description: Observability Stack is designed to manage the K8s-TrilioVault Application's
Logging, Monitoring and Visualization.
icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png
kubeVersion: '>=1.19.0-0'
maintainers:
- email: support@trilio.io
name: Trilio
name: observability
version: 0.1.0

View File

@ -0,0 +1,18 @@
apiVersion: v2
appVersion: 0.1.0
dependencies:
- condition: loki.enabled
name: loki
repository: https://grafana.github.io/helm-charts
version: ^2.11.1
- condition: promtail.enabled
name: promtail
repository: https://grafana.github.io/helm-charts
version: ^4.2.0
description: Logging Stack designed to manage the K8s-TrilioVault Application's Logs.
icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png
maintainers:
- email: support@trilio.io
name: Trilio
name: logging
version: 0.1.0

View File

@ -0,0 +1,13 @@
apiVersion: v1
appVersion: v2.5.0
description: 'Loki: like Prometheus, but for logs.'
home: https://grafana.com/loki
icon: https://raw.githubusercontent.com/grafana/loki/master/docs/sources/logo.png
kubeVersion: ^1.10.0-0
maintainers:
- email: support@trilio.io
name: Trilio
name: loki
sources:
- https://github.com/grafana/loki
version: 2.11.1

View File

@ -0,0 +1,3 @@
Verify the application is working by running these commands:
kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "loki.fullname" . }} {{ .Values.service.port }}
curl http://127.0.0.1:{{ .Values.service.port }}/api/prom/label

View File

@ -0,0 +1,75 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "loki.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "loki.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "loki.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account
*/}}
{{- define "loki.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "loki.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the app name of loki clients. Defaults to the same logic as "loki.fullname", and default client expects "promtail".
*/}}
{{- define "client.name" -}}
{{- if .Values.client.name -}}
{{- .Values.client.name -}}
{{- else if .Values.client.fullnameOverride -}}
{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default "promtail" .Values.client.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate a right Ingress apiVersion
*/}}
{{- define "ingress.apiVersion" -}}
{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.GitVersion -}}
networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1
{{- end }}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- if or (.Values.useExistingAlertingGroup.enabled) (gt (len .Values.alerting_groups) 0) }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "loki.fullname" . }}-alerting-rules
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{ template "loki.fullname" . }}-alerting-rules.yaml: |-
groups:
{{- toYaml .Values.alerting_groups | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "loki.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- $apiVersion := include "ingress.apiVersion" . -}}
apiVersion: {{ $apiVersion }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
{{- if eq $apiVersion "networking.k8s.io/v1" }}
pathType: Prefix
{{- end }}
backend:
{{- if eq $apiVersion "networking.k8s.io/v1" }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
podSelector:
matchLabels:
name: {{ template "loki.fullname" . }}
app: {{ template "loki.name" . }}
release: {{ .Release.Name }}
ingress:
- from:
- podSelector:
matchLabels:
app: {{ template "client.name" . }}
release: {{ .Release.Name }}
- ports:
- port: {{ .Values.service.port }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ template "loki.chart" . }}
spec:
selector:
matchLabels:
app: {{ template "loki.name" . }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end }}

View File

@ -0,0 +1,41 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "loki.fullname" . }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'configMap'
- 'emptyDir'
- 'persistentVolumeClaim'
- 'secret'
- 'projected'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.prometheusRule.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "loki.fullname" . }}
{{- if .Values.serviceMonitor.prometheusRule.namespace }}
namespace: {{ .Values.serviceMonitor.prometheusRule.namespace | quote }}
{{- end }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.serviceMonitor.prometheusRule.additionalLabels }}
{{- toYaml .Values.serviceMonitor.prometheusRule.additionalLabels | nindent 4 }}
{{- end }}
spec:
{{- if .Values.serviceMonitor.prometheusRule.rules }}
groups:
- name: {{ template "loki.fullname" . }}
rules: {{- toYaml .Values.serviceMonitor.prometheusRule.rules | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.rbac.pspEnabled }}
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "loki.fullname" . }}]
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "loki.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "loki.serviceAccountName" . }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if not .Values.config.existingSecret -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
loki.yaml: {{ tpl (toYaml .Values.config) . | b64enc}}
{{- end -}}

View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "loki.fullname" . }}-headless
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app.kubernetes.io/instance: {{ template "loki.name" . }}
{{- include "k8s-triliovault-operator.labels" . | nindent 4 }}
variant: headless
spec:
clusterIP: None
ports:
- port: {{ .Values.service.port }}
protocol: TCP
name: http-metrics
targetPort: {{ .Values.service.targetPort }}
{{- if .Values.extraPorts }}
{{ toYaml .Values.extraPorts | indent 4}}
{{- end }}
selector:
app: {{ template "loki.name" . }}
release: {{ .Release.Name }}

View File

@ -0,0 +1,45 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app.kubernetes.io/instance: {{ template "loki.name" . }}
{{- include "k8s-triliovault-operator.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if (and (eq .Values.service.type "ClusterIP") (not (empty .Values.service.clusterIP))) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
protocol: TCP
name: http-metrics
targetPort: {{ .Values.service.targetPort }}
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.extraPorts }}
{{ toYaml .Values.extraPorts | indent 4}}
{{- end }}
selector:
app: {{ template "loki.name" . }}
release: {{ .Release.Name }}

View File

@ -0,0 +1,16 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
{{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
name: {{ template "loki.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "loki.fullname" . }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
{{- if .Values.serviceMonitor.annotations }}
annotations:
{{ toYaml .Values.serviceMonitor.annotations | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app: {{ template "loki.name" . }}
release: {{ .Release.Name | quote }}
variant: headless
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- if .Values.serviceMonitor.path }}
path: {{ .Values.serviceMonitor.path }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,160 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "loki.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "loki.name" . }}
chart: {{ template "loki.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "k8s-triliovault-operator.labels" . | nindent 4 }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
spec:
podManagementPolicy: {{ .Values.podManagementPolicy }}
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ template "loki.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "k8s-triliovault-operator.labels" . | nindent 6 }}
serviceName: {{ template "loki.fullname" . }}-headless
updateStrategy:
{{- toYaml .Values.updateStrategy | nindent 4 }}
template:
metadata:
labels:
app: {{ template "loki.name" . }}
name: {{ template "loki.fullname" . }}
release: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "k8s-triliovault-operator.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{- if not .Values.config.existingSecret }}
checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "loki.serviceAccountName" . }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
securityContext:
{{- toYaml .Values.securityContext | nindent 8 }}
initContainers:
{{- toYaml .Values.initContainers | nindent 8 }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "-config.file=/etc/loki/loki.yaml"
{{- range $key, $value := .Values.extraArgs }}
- "-{{ $key }}={{ $value }}"
{{- end }}
volumeMounts:
- name: tmp
mountPath: /tmp
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | nindent 12}}
{{- end }}
- name: config
mountPath: /etc/loki
- name: storage
mountPath: "/data"
subPath: {{ .Values.persistence.subPath }}
{{- if or (.Values.useExistingAlertingGroup.enabled) (gt (len .Values.alerting_groups) 0) }}
- name: rules
mountPath: /rules/fake
{{- end }}
ports:
- name: http-metrics
containerPort: {{ .Values.config.server.http_listen_port }}
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
readOnlyRootFilesystem: true
env:
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 12 }}
{{- end }}
{{- if .Values.tracing.jaegerAgentHost }}
- name: JAEGER_AGENT_HOST
value: "{{ .Values.tracing.jaegerAgentHost }}"
{{- end }}
{{- if .Values.extraContainers }}
{{ toYaml .Values.extraContainers | indent 8}}
{{- end }}
nodeSelector:
{{- toYaml .Values.nodeSelector | nindent 8 }}
affinity:
{{- toYaml .Values.affinity | nindent 8 }}
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
volumes:
- name: tmp
emptyDir: {}
{{- if or (.Values.useExistingAlertingGroup.enabled) (gt (len .Values.alerting_groups) 0) }}
- name: rules
configMap:
{{- if .Values.useExistingAlertingGroup.enabled }}
name: {{ .Values.useExistingAlertingGroup.configmapName }}
{{- else }}
name: {{ template "loki.fullname" . }}-alerting-rules
{{- end }}
{{- end }}
- name: config
secret:
{{- if .Values.config.existingSecret }}
secretName: {{ .Values.config.existingSecret }}
{{- else }}
secretName: {{ template "loki.fullname" . }}
{{- end }}
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8}}
{{- end }}
{{- if not .Values.persistence.enabled }}
- name: storage
emptyDir: {}
{{- else if .Values.persistence.existingClaim }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: storage
annotations:
{{- toYaml .Values.persistence.annotations | nindent 8 }}
spec:
accessModes:
{{- toYaml .Values.persistence.accessModes | nindent 8 }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
storageClassName: {{ .Values.persistence.storageClassName }}
{{- if .Values.persistence.selector }}
selector:
{{- toYaml .Values.persistence.selector | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,325 @@
image:
repository: grafana/loki
tag: 2.5.0
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - loki
# topologyKey: "kubernetes.io/hostname"
## StatefulSet annotations
annotations: {}
# enable tracing for debug, need install jaeger and specify right jaeger_agent_host
tracing:
jaegerAgentHost:
config:
# existingSecret:
auth_enabled: false
ingester:
chunk_idle_period: 3m
chunk_block_size: 262144
chunk_retain_period: 1m
max_transfer_retries: 0
wal:
dir: /data/loki/wal
lifecycler:
ring:
kvstore:
store: inmemory
replication_factor: 1
## Different ring configs can be used. E.g. Consul
# ring:
# store: consul
# replication_factor: 1
# consul:
# host: "consul:8500"
# prefix: ""
# http_client_timeout: "20s"
# consistent_reads: true
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
max_entries_limit_per_query: 5000
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
server:
http_listen_port: 3100
storage_config:
boltdb_shipper:
active_index_directory: /data/loki/boltdb-shipper-active
cache_location: /data/loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: filesystem
filesystem:
directory: /data/loki/chunks
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
compactor:
working_directory: /data/loki/boltdb-shipper-compactor
shared_store: filesystem
# Needed for Alerting: https://grafana.com/docs/loki/latest/rules/
# This is just a simple example, for more details: https://grafana.com/docs/loki/latest/configuration/#ruler_config
# ruler:
# storage:
# type: local
# local:
# directory: /rules
# rule_path: /tmp/scratch
# alertmanager_url: http://alertmanager.svc.namespace:9093
# ring:
# kvstore:
# store: inmemory
# enable_api: true
## Additional Loki container arguments, e.g. log level (debug, info, warn, error)
extraArgs: {}
# log.level: debug
livenessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
networkPolicy:
enabled: false
## The app name of loki clients
client: {}
# name:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
## If you set enabled as "True", you need :
## - create a pv which above 10Gi and has same namespace with loki
## - keep storageClassName same with below setting
persistence:
enabled: false
accessModes:
- ReadWriteOnce
size: 10Gi
annotations: {}
# selector:
# matchLabels:
# app.kubernetes.io/name: loki
# subPath: ""
# existingClaim:
## Pod Labels
podLabels: {}
## Pod Annotations
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "http-metrics"
podManagementPolicy: OrderedReady
## Assign a PriorityClassName to pods if set
# priorityClassName:
rbac:
create: true
pspEnabled: false
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
replicas: 1
resources:
limits:
cpu: 1000m
memory: 500Mi
requests:
cpu: 500m
memory: 256Mi
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
service:
type: ClusterIP
nodePort:
port: 3100
annotations: {}
labels: {}
targetPort: http-metrics
serviceAccount:
create: true
name:
annotations: {}
automountServiceAccountToken: true
terminationGracePeriodSeconds: 4800
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# The values to set in the PodDisruptionBudget spec
# If not set then a PodDisruptionBudget will not be created
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
updateStrategy:
type: RollingUpdate
serviceMonitor:
enabled: false
interval: ""
additionalLabels: {}
annotations: {}
# scrapeTimeout: 10s
# path: /metrics
prometheusRule:
enabled: false
additionalLabels: {}
# namespace:
rules: []
# Some examples from https://awesome-prometheus-alerts.grep.to/rules.html#loki
# - alert: LokiProcessTooManyRestarts
# expr: changes(process_start_time_seconds{job=~"loki"}[15m]) > 2
# for: 0m
# labels:
# severity: warning
# annotations:
# summary: Loki process too many restarts (instance {{ $labels.instance }})
# description: "A loki process had too many restarts (target {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
# - alert: LokiRequestErrors
# expr: 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[1m])) by (namespace, job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (namespace, job, route) > 10
# for: 15m
# labels:
# severity: critical
# annotations:
# summary: Loki request errors (instance {{ $labels.instance }})
# description: "The {{ $labels.job }} and {{ $labels.route }} are experiencing errors\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
# - alert: LokiRequestPanic
# expr: sum(increase(loki_panic_total[10m])) by (namespace, job) > 0
# for: 5m
# labels:
# severity: critical
# annotations:
# summary: Loki request panic (instance {{ $labels.instance }})
# description: "The {{ $labels.job }} is experiencing {{ printf \"%.2f\" $value }}% increase of panics\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
# - alert: LokiRequestLatency
# expr: (histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{route!~"(?i).*tail.*"}[5m])) by (le))) > 1
# for: 5m
# labels:
# severity: critical
# annotations:
# summary: Loki request latency (instance {{ $labels.instance }})
# description: "The {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf \"%.2f\" $value }}s 99th percentile latency\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
initContainers: []
## Init containers to be added to the loki pod.
# - name: my-init-container
# image: busybox:latest
# command: ['sh', '-c', 'echo hello']
extraContainers: []
## Additional containers to be added to the loki pod.
# - name: reverse-proxy
# image: angelbarrera92/basic-auth-reverse-proxy:dev
# args:
# - "serve"
# - "--upstream=http://localhost:3100"
# - "--auth-config=/etc/reverse-proxy-conf/authn.yaml"
# ports:
# - name: http
# containerPort: 11811
# protocol: TCP
# volumeMounts:
# - name: reverse-proxy-auth-config
# mountPath: /etc/reverse-proxy-conf
extraVolumes: []
## Additional volumes to the loki pod.
# - name: reverse-proxy-auth-config
# secret:
# secretName: reverse-proxy-auth-config
## Extra volume mounts that will be added to the loki container
extraVolumeMounts: []
extraPorts: []
## Additional ports to the loki services. Useful to expose extra container ports.
# - port: 11811
# protocol: TCP
# name: http
# targetPort: http
# Extra env variables to pass to the loki container
env: []
# Specify Loki Alerting rules based on this documentation: https://grafana.com/docs/loki/latest/rules/
# When specified, you also need to add a ruler config section above. An example is shown in the alerting docs.
alerting_groups: []
# - name: example
# rules:
# - alert: HighThroughputLogStreams
# expr: sum by(container) (rate({job=~"loki-dev/.*"}[1m])) > 1000
# for: 2m
useExistingAlertingGroup:
enabled: false
configmapName: ""

View File

@ -0,0 +1,16 @@
apiVersion: v2
appVersion: 2.5.0
description: Promtail is an agent which ships the contents of local logs to a Loki
instance
home: https://grafana.com/loki
icon: https://raw.githubusercontent.com/grafana/loki/master/docs/sources/logo.png
maintainers:
- email: support@trilio.io
name: Trilio
name: promtail
sources:
- https://github.com/grafana/loki
- https://grafana.com/oss/loki/
- https://grafana.com/docs/loki/latest/
type: application
version: 4.2.0

View File

@ -0,0 +1,10 @@
***********************************************************************
Welcome to Grafana Promtail
Chart version: {{ .Chart.Version }}
Promtail version: {{ .Values.image.tag | default .Chart.AppVersion }}
***********************************************************************
Verify the application is working by running these commands:
* kubectl --namespace {{ .Release.Namespace }} port-forward daemonset/{{ include "promtail.fullname" . }} {{ .Values.config.serverPort }}
* curl http://127.0.0.1:{{ .Values.config.serverPort }}/metrics

View File

@ -0,0 +1,81 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "promtail.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "promtail.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "promtail.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "promtail.labels" -}}
helm.sh/chart: {{ include "promtail.chart" . }}
{{ include "promtail.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "promtail.selectorLabels" -}}
app.kubernetes.io/name: {{ include "promtail.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ include "k8s-triliovault-operator.labels" .}}
{{- end }}
{{/*
Create the name of the service account
*/}}
{{- define "promtail.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "promtail.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
The service name to connect to Loki. Defaults to the same logic as "loki.fullname"
*/}}
{{- define "loki.serviceName" -}}
{{- if .Values.loki.serviceName -}}
{{- .Values.loki.serviceName -}}
{{- else if .Values.loki.fullnameOverride -}}
{{- .Values.loki.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default "loki" .Values.loki.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "promtail.fullname" . }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- watch
- list
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "promtail.fullname" . }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "promtail.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "promtail.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,132 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "promtail.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
updateStrategy:
{{- toYaml .Values.updateStrategy | nindent 4 }}
template:
metadata:
labels:
{{- include "promtail.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "promtail.serviceAccountName" . }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- if .Values.initContainer.enabled }}
initContainers:
- name: init
image: "{{ .Values.initContainer.image.registry }}/{{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }}"
imagePullPolicy: {{ .Values.initContainer.image.pullPolicy }}
command:
- sh
- -c
- sysctl -w fs.inotify.max_user_instances={{ .Values.initContainer.fsInotifyMaxUserInstances }}
securityContext:
privileged: true
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: promtail
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "-config.file=/etc/promtail/promtail.yaml"
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/promtail
- name: run
mountPath: /run/promtail
{{- with .Values.defaultVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.extraEnvFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: http-metrics
containerPort: {{ .Values.config.serverPort }}
protocol: TCP
{{- range $key, $values := .Values.extraPorts }}
- name: {{ .name | default $key }}
containerPort: {{ $values.containerPort }}
protocol: {{ $values.protocol | default "TCP" }}
{{- end }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
secret:
secretName: {{ include "promtail.fullname" . }}
- name: run
hostPath:
path: /run/promtail
{{- with .Values.defaultVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{ range .Values.extraObjects }}
---
{{ tpl (toYaml .) $ }}
{{ end }}

View File

@ -0,0 +1,126 @@
{{- if .Values.networkPolicy.enabled }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "promtail.name" . }}-namespace-only
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector: {}
ingress:
- from:
- podSelector: {}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "promtail.name" . }}-egress-dns
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
- ports:
- port: 53
protocol: UDP
to:
- namespaceSelector: {}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "promtail.name" . }}-egress-k8s-api
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
- ports:
- port: {{ .Values.networkPolicy.k8sApi.port }}
protocol: TCP
{{- if len .Values.networkPolicy.k8sApi.cidrs }}
to:
{{- range $cidr := .Values.networkPolicy.k8sApi.cidrs }}
- ipBlock:
cidr: {{ $cidr }}
{{- end }}
{{- end }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "promtail.name" . }}-ingress-metrics
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
ingress:
- ports:
- port: http-metrics
protocol: TCP
{{- if len .Values.networkPolicy.metrics.cidrs }}
from:
{{- range $cidr := .Values.networkPolicy.metrics.cidrs }}
- ipBlock:
cidr: {{ $cidr }}
{{- end }}
{{- if .Values.networkPolicy.metrics.namespaceSelector }}
- namespaceSelector:
{{- toYaml .Values.networkPolicy.metrics.namespaceSelector | nindent 12 }}
{{- if .Values.networkPolicy.metrics.podSelector }}
podSelector:
{{- toYaml .Values.networkPolicy.metrics.podSelector | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.extraPorts }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "promtail.name" . }}-egress-extra-ports
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
- ports:
{{- range $extraPortConfig := .Values.extraPorts }}
- port: {{ $extraPortConfig.containerPort }}
protocol: {{ $extraPortConfig.protocol }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,10 @@
{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "promtail.fullname" . }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
{{- toYaml .Values.podSecurityPolicy | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "promtail.fullname" . }}-psp
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- {{ include "promtail.fullname" . }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "promtail.fullname" . }}-psp
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "promtail.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ include "promtail.serviceAccountName" . }}
{{- end }}

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "promtail.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
stringData:
promtail.yaml: |
{{- tpl .Values.config.file . | nindent 4 }}

View File

@ -0,0 +1,52 @@
{{- range $key, $values := .Values.extraPorts }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "promtail.fullname" $ }}-{{ $key | lower }}
namespace: {{ $.Release.Namespace }}
labels:
{{- include "promtail.labels" $ | nindent 4 }}
{{- with .labels }}
{{- toYaml $ | nindent 4 }}
{{- end }}
{{- with .annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with $values.service }}
type: {{ .type | default "ClusterIP" }}
{{- with .clusterIP }}
clusterIP: {{ . }}
{{- end }}
{{- with .loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
{{- with .loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end -}}
{{- with .externalIPs }}
externalIPs:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .externalTrafficPolicy }}
externalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
ports:
- name: {{ .name | default $key }}
targetPort: {{ .name | default $key }}
protocol: TCP
{{- if $values.service }}
port: {{ $values.service.port | default $values.containerPort }}
{{- if $values.service.nodePort }}
nodePort: {{ $values.service.nodePort }}
{{- end }}
{{- else }}
port: {{ $values.containerPort }}
{{- end }}
selector:
{{- include "promtail.selectorLabels" $ | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "promtail.fullname" . }}-metrics
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
spec:
clusterIP: None
ports:
- name: http-metrics
port: {{ .Values.config.serverPort }}
targetPort: http-metrics
protocol: TCP
selector:
{{- include "promtail.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "promtail.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "promtail.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "promtail.fullname" $ }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- with .Values.serviceMonitor.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "promtail.labels" $ | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.serviceMonitor.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "promtail.selectorLabels" . | nindent 6 }}
endpoints:
- port: http-metrics
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,430 @@
# -- Overrides the chart's name
nameOverride: null
# -- Overrides the chart's computed fullname
fullnameOverride: null
initContainer:
# -- Specifies whether the init container for setting inotify max user instances is to be enabled
enabled: false
image:
# -- The Docker registry for the init container
registry: docker.io
# -- Docker image repository for the init container
repository: busybox
# -- Docker tag for the init container
tag: 1.33
# -- Docker image pull policy for the init container image
pullPolicy: IfNotPresent
# -- The inotify max user instances to configure
fsInotifyMaxUserInstances: 128
image:
# -- The Docker registry
registry: docker.io
# -- Docker image repository
repository: grafana/promtail
# -- Overrides the image tag whose default is the chart's appVersion
tag: null
# -- Docker image pull policy
pullPolicy: IfNotPresent
# -- Image pull secrets for Docker images
imagePullSecrets: []
# -- Annotations for the DaemonSet
annotations:
ignore-check.kube-linter.io/run-as-non-root: "This deployment needs to run as root user to modify log files"
ignore-check.kube-linter.io/writable-host-mount: "This deployment needs writable volume mount on host to capture logs"
# -- The update strategy for the DaemonSet
updateStrategy:
type: RollingUpdate
# -- Pod labels
podLabels: {}
# -- Pod annotations
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "http-metrics"
# -- The name of the PriorityClass
priorityClassName: null
# -- Liveness probe
livenessProbe: {}
# -- Readiness probe
# @default -- See `values.yaml`
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# -- Resource requests and limits
resources:
limits:
cpu: 1000m
memory: 500Mi
requests:
cpu: 500m
memory: 256Mi
# -- The security context for pods
podSecurityContext:
runAsUser: 0
runAsGroup: 0
# -- The security context for containers
containerSecurityContext:
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
rbac:
# -- Specifies whether RBAC resources are to be created
create: true
# -- Specifies whether a PodSecurityPolicy is to be created
pspEnabled: false
serviceAccount:
# -- Specifies whether a ServiceAccount should be created
create: true
# -- The name of the ServiceAccount to use.
# If not set and `create` is true, a name is generated using the fullname template
name: null
# -- Image pull secrets for the service account
imagePullSecrets: []
# -- Annotations for the service account
annotations: {}
# -- Node selector for pods
nodeSelector: {}
# -- Affinity configuration for pods
affinity: {}
# -- Tolerations for pods. By default, pods will be scheduled on master/control-plane nodes.
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
# -- Default volumes that are mounted into pods. In most cases, these should not be changed.
# Use `extraVolumes`/`extraVolumeMounts` for additional custom volumes.
# @default -- See `values.yaml`
defaultVolumes:
- name: containers
hostPath:
path: /var/lib/docker/containers
- name: pods
hostPath:
path: /var/log/pods
# -- Default volume mounts. Corresponds to `volumes`.
# @default -- See `values.yaml`
defaultVolumeMounts:
- name: containers
mountPath: /var/lib/docker/containers
readOnly: true
- name: pods
mountPath: /var/log/pods
readOnly: true
# Extra volumes to be added in addition to those specified under `defaultVolumes`.
extraVolumes: []
# Extra volume mounts together. Corresponds to `extraVolumes`.
extraVolumeMounts: []
# Extra args for the Promtail container.
extraArgs: []
# -- Example:
# -- extraArgs:
# -- - -client.external-labels=hostname=$(HOSTNAME)
# -- Extra environment variables
extraEnv: []
# -- Extra environment variables from secrets or configmaps
extraEnvFrom: []
# ServiceMonitor configuration
serviceMonitor:
# -- If enabled, ServiceMonitor resources for Prometheus Operator are created
enabled: false
# -- Alternative namespace for ServiceMonitor resources
namespace: null
# -- Namespace selector for ServiceMonitor resources
namespaceSelector: {}
# -- ServiceMonitor annotations
annotations: {}
# -- Additional ServiceMonitor labels
labels: {}
# -- ServiceMonitor scrape interval
interval: null
# -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s)
scrapeTimeout: null
# -- ServiceMonitor relabel configs to apply to samples before scraping
# https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
relabelings: []
# -- Configure additional ports and services. For each configured port, a corresponding service is created.
# See values.yaml for details
extraPorts: {}
# syslog:
# name: tcp-syslog
# containerPort: 1514
# protocol: TCP
# service:
# type: ClusterIP
# clusterIP: null
# port: 1514
# externalIPs: []
# nodePort: null
# annotations: {}
# labels: {}
# loadBalancerIP: null
# loadBalancerSourceRanges: []
# externalTrafficPolicy: null
# -- PodSecurityPolicy configuration.
# @default -- See `values.yaml`
podSecurityPolicy:
privileged: true
allowPrivilegeEscalation: true
volumes:
- 'secret'
- 'hostPath'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
# -- Section for crafting Promtails config file. The only directly relevant value is `config.file`
# which is a templated string that references the other values and snippets below this key.
# @default -- See `values.yaml`
config:
# -- The log level of the Promtail server
# Must be reference in `config.file` to configure `server.log_level`
# See default config in `values.yaml`
logLevel: info
# -- The port of the Promtail server
# Must be reference in `config.file` to configure `server.http_listen_port`
# See default config in `values.yaml`
serverPort: 3101
# -- The Loki address to post logs to.
# Must be reference in `config.file` to configure `client.url`.
# See default config in `values.yaml`
lokiAddress: http://{{ .Release.Name }}-loki:3100/loki/api/v1/push
# -- A section of reusable snippets that can be reference in `config.file`.
# Custom snippets may be added in order to reduce redundancy.
# This is especially helpful when multiple `kubernetes_sd_configs` are use which usually have large parts in common.
# @default -- See `values.yaml`
snippets:
pipelineStages:
- cri: {}
- match:
selector: '{app="k8s-triliovault"}'
stages:
- json:
expressions:
file: file
func: func
level: level
msg: msg
tvk_version: tvk_version
tvk_instance_id: tvk_instance_id
service_id: service_id
service_type: service_type
transaction_id: transaction_id
transaction_type: transaction_type
transaction_resource_name: transaction_resource_name
transaction_resource_namespace: transaction_resource_namespace
child_transaction_type: child_transaction_type
child_transaction_id: child_transaction_id
child_transaction_resource_name: child_transaction_resource_name
child_transaction_resource_namespace: child_transaction_resource_namespace
- labels:
file:
func:
level:
msg:
tvk_version:
tvk_instance_id:
service_id:
service_type:
transaction_id:
transaction_type:
transaction_resource_name:
transaction_resource_namespace:
child_transaction_type:
child_transaction_id:
child_transaction_resource_name:
child_transaction_resource_namespace:
common:
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node_name
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
replacement: $1
separator: /
source_labels:
- namespace
- app
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- action: replace
replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: replace
replacement: /var/log/pods/*$1/*.log
regex: true/(.*)
separator: /
source_labels:
- __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash
- __meta_kubernetes_pod_annotation_kubernetes_io_config_hash
- __meta_kubernetes_pod_container_name
target_label: __path__
# If set to true, adds an additional label for the scrape job.
# This helps debug the Promtail config.
addScrapeJobLabel: false
# -- You can put here any keys that will be directly added to the config file's 'client' block.
# @default -- empty
extraClientConfigs: []
# -- You can put here any additional scrape configs you want to add to the config file.
# @default -- empty
extraScrapeConfigs: ""
# -- You can put here any additional relabel_configs to "kubernetes-pods" job
extraRelabelConfigs: []
scrapeConfigs: |
# See also https://github.com/grafana/loki/blob/master/production/ksonnet/promtail/scrape_config.libsonnet for reference
- job_name: kubernetes-pods
pipeline_stages:
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_controller_name
regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})?
action: replace
target_label: __tmp_controller_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
- __meta_kubernetes_pod_label_app
- __tmp_controller_name
- __meta_kubernetes_pod_name
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: app
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
- __meta_kubernetes_pod_label_component
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: component
{{- if .Values.config.snippets.addScrapeJobLabel }}
- replacement: kubernetes-pods
target_label: scrape_job
{{- end }}
{{- toYaml .Values.config.snippets.common | nindent 4 }}
{{- with .Values.config.snippets.extraRelabelConfigs }}
{{- toYaml . | nindent 4 }}
{{- end }}
# -- Config file contents for Promtail.
# Must be configured as string.
# It is templated so it can be assembled from reusable snippets in order to avoid redundancy.
# @default -- See `values.yaml`
file: |
server:
log_level: {{ .Values.config.logLevel }}
http_listen_port: {{ .Values.config.serverPort }}
clients:
- url: {{ tpl .Values.config.lokiAddress . }}
{{- with .Values.config.snippets.extraClientConfigs }}
{{- toYaml . | nindent 2 }}
{{- end }}
positions:
filename: /run/promtail/positions.yaml
scrape_configs:
{{- tpl .Values.config.snippets.scrapeConfigs . | nindent 2 }}
{{- tpl .Values.config.snippets.extraScrapeConfigs . | nindent 2 }}
networkPolicy:
# -- Specifies whether Network Policies should be created
enabled: false
metrics:
# -- Specifies the Pods which are allowed to access the metrics port.
# As this is cross-namespace communication, you also neeed the namespaceSelector.
podSelector: {}
# -- Specifies the namespaces which are allowed to access the metrics port
namespaceSelector: {}
# -- Specifies specific network CIDRs which are allowed to access the metrics port.
# In case you use namespaceSelector, you also have to specify your kubelet networks here.
# The metrics ports are also used for probes.
cidrs: []
k8sApi:
# -- Specify the k8s API endpoint port
port: 8443
# -- Specifies specific network CIDRs you want to limit access to
cidrs: []
# -- Extra K8s manifests to deploy
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"
# kind: ExternalSecret
# metadata:
# name: promtail-secrets
# spec:
# backendType: gcpSecretsManager
# data:
# - key: promtail-oauth2-creds
# name: client_secret

View File

@ -0,0 +1,50 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "logging.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "logging.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "logging.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
The service name to connect to Loki. Defaults to the same logic as "loki.fullname"
*/}}
{{- define "loki.serviceName" -}}
{{- if .Values.loki.serviceName -}}
{{- .Values.loki.serviceName -}}
{{- else if .Values.loki.fullnameOverride -}}
{{- .Values.loki.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default "loki" .Values.loki.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "logging.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "logging.name" . }}
chart: {{ template "logging.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app.kubernetes.io/instance: {{ template "logging.name" . }}
{{- include "k8s-triliovault-operator.labels" . | nindent 4 }}
grafana_datasource: "1"
data:
logging-datasource.yaml: |-
apiVersion: 1
datasources:
{{- if .Values.loki.enabled }}
- name: Loki
type: loki
access: proxy
url: http://{{(include "loki.serviceName" .)}}:{{ .Values.loki.service.port }}
version: 1
{{- end }}

View File

@ -0,0 +1,16 @@
apiVersion: v2
appVersion: 0.1.0
dependencies:
- condition: prometheus.enabled
name: prometheus
repository: https://prometheus-community.github.io/helm-charts
version: ^15.8.7
description: Monitoring Stack designed to manage the K8s-TrilioVault Application's
Monitoring.
icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png
kubeVersion: '>=1.19.0-0'
maintainers:
- email: support@trilio.io
name: Trilio
name: monitoring
version: 0.1.0

View File

@ -0,0 +1,22 @@
apiVersion: v2
appVersion: 2.34.0
dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 4.7.*
description: Prometheus is a monitoring system and time series database.
home: https://prometheus.io/
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
maintainers:
- email: support@trilio.io
name: Trilio
name: prometheus
sources:
- https://github.com/prometheus/alertmanager
- https://github.com/prometheus/prometheus
- https://github.com/prometheus/pushgateway
- https://github.com/prometheus/node_exporter
- https://github.com/kubernetes/kube-state-metrics
type: application
version: 15.8.7

View File

@ -0,0 +1,17 @@
apiVersion: v2
appVersion: 2.4.1
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
- metric
- monitoring
- prometheus
- kubernetes
maintainers:
- email: support@trilio.io
name: Trilio
name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 4.7.0

View File

@ -0,0 +1,10 @@
kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
The exposed metrics can be found here:
https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics
The metrics are exported on the HTTP endpoint /metrics on the listening port.
In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics
They are served either as plaintext or protobuf depending on the Accept header.
They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.

View File

@ -0,0 +1,82 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kube-state-metrics.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kube-state-metrics.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kube-state-metrics.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "kube-state-metrics.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kube-state-metrics.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Generate basic labels
*/}}
{{- define "kube-state-metrics.labels" }}
helm.sh/chart: {{ template "kube-state-metrics.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }}
{{- include "kube-state-metrics.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels }}
{{- end }}
{{- if .Values.releaseLabel }}
release: {{ .Release.Name }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kube-state-metrics.selectorLabels" }}
app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More