rancher-partner-charts/charts/dkube/dkube-deployer/templates/config-map.yaml

168 lines
7.7 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: dkube-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
data:
dkube.ini: |
#################################################################
# #
# DKUBE CONFIG FILE #
# #
#################################################################
[REQUIRED]
# Choose one of dkube/gke/okd/eks/ntnx
KUBE_PROVIDER={{ .Values.provider }}
# When HA=true k8s cluster must have min 3 schedulable nodes
HA={{ .Values.ha }}
# Operator's Local Sign In Details
# Username cannot be same as that of a namespace's name.
# Also, following names are restricted- dkube, monitoring, kubeflow
# '$' is not supported
USERNAME={{ .Values.username }}
PASSWORD={{ .Values.password }}
# To wipe dkube storage
# Accepted values: yes/no
WIPEDATA={{ .Values.wipedata }}
# To install minimal version of dkube
# Accepted values: yes/no
MINIMAL={{ .Values.minimal }}
# To install air-gapped version of dkube
# Accepted values: yes/no
AIRGAP={{ .Values.airgap }}
[NODE-AFFINITY]
# Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA
# Example: DKUBE_NODES_LABEL: key1=value1
DKUBE_NODES_LABEL: {{ .Values.optional.nodeAffinity.dkubeNodesLabel }}
# Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes
# Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
DKUBE_NODES_TAINTS: {{ .Values.optional.nodeAffinity.dkubeNodesTaints }}
# Taints of the nodes where gpu workloads must be scheduled.
# Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
GPU_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.gpuWorkloadTaints }}
# Taints of the nodes where production workloads must be scheduled.
# Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
PRODUCTION_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.productionWorkloadTaints }}
[OPTIONAL]
# version of dkube installer to be used
DKUBE_INSTALLER_VERSION={{ .Values.version }}
# version of dkube to be installed
DKUBE_VERSION={{ .Values.version }}
# Dockerhub Secrets for OCDR images
# If you don't create, this will be auto-created with default values.
DKUBE_DOCKERHUB_CREDENTIALS_SECRET={{ .Values.optional.dkubeDockerhubCredentialsSecret }}
# TLS Secret of Operator's Certificate & Private Key
# If you don't create, place your certificate and private key in $HOME/.dkube
DKUBE_OPERATOR_CERTIFICATE=
# Repository from where Dkube images can be picked.
# Format: registry/[repo]
DKUBE_REGISTRY={{ .Values.registry.name }}
# Container registry username
REGISTRY_UNAME={{ .Values.registry.username }}
# Container registry password
REGISTRY_PASSWD={{ .Values.registry.password }}
# AWS IAM role
# Valid only if KUBE_PROVIDER=eks
# This will be set as an annotation in few deployments
# Format should be like:
# IAM_ROLE=<key>: <iam role>
# eg: IAM_ROLE=iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole
# Note: Don't enclose with quotes
IAM_ROLE={{ .Values.optional.IAMRole }}
[EXTERNAL]
# Type of dkube proxy service, possible values are nodeport and loadbalancer
ACCESS={{ .Values.optional.loadbalancer.access }}
# 'true' - to install MetalLB Loadbalancer
# Must fill LB_VIP_POOL if true
INSTALL_LOADBALANCER={{ .Values.optional.loadbalancer.metallb }}
# Only CIDR notation is allowed. E.g: 192.168.2.0/24
# Valid only if INSTALL_LOADBALANCER=true
LB_VIP_POOL={{ .Values.optional.loadbalancer.vipPool }}
[STORAGE]
# Type of storage
# Possible values: disk, pv, sc, nfs
# Following are required fields for corresponding storage type
# -------------------------------------------------------
# STORAGE_TYPE REQUIRED_FIELDS
# -------------------------------------------------------
# disk STORAGE_DISK_NODE and STORAGE_DISK_PATH
# pv STORAGE_PV
# sc STORAGE_SC
# nfs STORAGE_NFS_SERVER and STORAGE_NFS_PATH
# ceph STORAGE_CEPH_MONITORS and STORAGE_CEPH_SECRET
# For 2.2.1.12 and later
# ceph STORAGE_CEPH_FILESYSTEM and STORAGE_CEPH_NAMESPACE
STORAGE_TYPE={{ .Values.optional.storage.type }}
# Localpath on the storage node
STORAGE_DISK_PATH={{ .Values.optional.storage.path }}
# Nodename of the storage node
# Possible values: AUTO/<nodename>
# AUTO - Master node will be chosen for storage if KUBE_PROVIDER=dkube
STORAGE_DISK_NODE={{ .Values.optional.storage.node }}
# Name of persistent volume
STORAGE_PV={{ .Values.optional.storage.persistentVolume }}
# Name of storage class name
# Make sure dynamic provisioner is running for the storage class name
STORAGE_SC={{ .Values.optional.storage.storageClass }}
# NFS server ip
STORAGE_NFS_SERVER={{ .Values.optional.storage.nfsServer }}
# NFS path (Make sure the path exists)
STORAGE_NFS_PATH={{ .Values.optional.storage.nfsPath }}
# Comma separated IPs of ceph monitors
STORAGE_CEPH_MONITORS={{ .Values.optional.storage.cephMonitors }}
# Ceph secret
STORAGE_CEPH_SECRET={{ .Values.optional.storage.cephSecret }}
# Name of the ceph filesystem
# E.g: dkubefs
STORAGE_CEPH_FILESYSTEM={{ .Values.optional.storage.cephFilesystem }}
# Name of the namespace where ceph is installed
# E.g: rook-ceph
STORAGE_CEPH_NAMESPACE={{ .Values.optional.storage.cephNamespace }}
# Internal Ceph
# Internal ceph is installed when HA=true and STORAGE_TYPE is not in ("nfs", "ceph")
# Both the following fields are compulsory
# Configuration path for internal ceph
STORAGE_CEPH_PATH={{ .Values.optional.storage.cephPath }}
# Disk name for internal ceph storage
# It should be a raw formatted disk
# E.g: sdb
STORAGE_CEPH_DISK={{ .Values.optional.storage.cephDisk }}
[MODELMONITOR]
#To enable modelmonitor in dkube. (true / false)
ENABLED={{ .Values.optional.modelmonitor.enabled }}
[CICD]
#To enable tekton cicd with dkube. (true / false)
ENABLED={{ .Values.optional.CICD.enabled }}
#Docker registry where CICD built images will be saved.
#For DockerHub, enter docker.io/<username>
DOCKER_REGISTRY={{ .Values.optional.CICD.registryName }}
REGISTRY_USERNAME={{ .Values.optional.CICD.registryUsername }}
REGISTRY_PASSWORD={{ .Values.optional.CICD.registryPassword }}
#For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com.
#DOCKER_REGISTRY=aws_account_id.dkr.ecr.region.amazonaws.com
#Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM
#based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess
IAM_ROLE={{ .Values.optional.CICD.IAMRole }}
[MODEL-CATALOG]
#To enable model catalog with dkube. (true / false)
ENABLED={{ template "dkube-deployer.modelCatalog" . }}
#To configure external database for dkube
[DBAAS]
#Supported mysql, sqlserver(mssql)
#Empty will pickup default sql db installed with dkube.
DATABASE={{ .Values.optional.DBAAS.database }}
#Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html
DSN={{ .Values.optional.DBAAS.dsn }}