Update Dkube: Result of running make charts

pull/454/head
Tiji Thomas 2022-07-07 12:01:53 +05:30
parent b93e5f1380
commit a08b0e1711
16 changed files with 1336 additions and 0 deletions

Binary file not shown.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,19 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dkube
catalog.cattle.io/release-name: dkube
apiVersion: v2
appVersion: 3.2.0.1
description: A Kubernetes-based MLOps platform based on open standards Kubeflow and
MLflow
home: https://dkube.io
icon: https://www.dkube.io/img/logo_new.png
keywords:
- kubernetes
- MLOps
- Kubeflow
- AI
kubeVersion: "1.20"
name: dkube-deployer
type: application
version: 1.0.602

View File

@ -0,0 +1,30 @@
# Dkube
[DKube](https://dkube.io/) is an MLOps product based on best of Kubeflow and MLFlow. It is optimized for implementation on-prem or in the cloud. You get the flexibility and innovation of open source ref architectures like Kubeflow and MLFlow as a supported product.
With DKube you can prepare your data including feature engineering, train AI models, optimize, tune and publish AI models and be able to deploy/serve those models. Kubeflow pipelines, KF Serving, MLFlow experiment tracking and comparison are all provided while allowing you to track the model and data versioning for reproducibility, audits and governance.
## Installation
### Requirements
The following is the minimum configuration required to deploy DKube on a Rancher cluster
- The minimal configuration for each of the worker nodes is as follows:
- 16 cores
- 64 GB RAM
- 300 GB storage for Root Volume
- The worker nodes could be brought up with any of the following OS distributions
- Ubuntu 20.04
- CentOS / RHEL 7.9
- Amazon Linux 2 for installations on AWS
- Storage
- The recommended storage option for DKube meta-data and user ML resources is an external NFS server with a min of 1TB storage available.
- For evaluation purposes, one of the worker nodes can be configured as the storage option. In this case the recommended size of storage on the worker node is 1 TB and a minimum size of 400 GB.
- Dkube requires a Kubernetes version of 1.20.
- Dkube images registry details are required for installation. Please send a mail to support@dkube.io for the details.
- The following sections in the installation guide needs to be followed to prepare Rancher cluster for Dkube installation.
- [Getting the Dkube Files](https://dkube.io/install/install3_x/Install-Getting-Started.html#getting-the-dkube-files)
- [Setting up the Rancher Cluster](https://dkube.io/install/install3_x/Install-Rancher.html#setting-up-the-rancher-cluster)
- [Preparing the Rancher Cluster](https://dkube.io/install/install3_x/Install-Rancher.html#preparing-the-rancher-cluster).
- [Node Setup](https://dkube.io/install/install3_x/Install-Rancher.html#node-setup). This is optional for a non-GPU cluster.
For more information on installation, refer to the [Dkube Installation Guide](https://dkube.io/install/install3_x/Install-Advanced.html).

View File

@ -0,0 +1,326 @@
questions:
- variable: EULA
description: "The Dkube EULA is available at www.oneconvergence.com/EULA/One-Convergence-EULA.pdf . By accepting this license agreement you acknowledge that you have read and understood the terms and conditions mentioned. Please refer to Basic Configuration section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#basic-configuration"
type: enum
label: DKUBE-EULA
required: true
group: "General"
options:
- "yes"
- variable: username
default: ""
description: "Dkube operator's local sigh-in username: Username cannot be same as that of a namespace's name. Also, following names are restricted - dkube, dkube-infra, kubeflow, istio-system, knative-serving, harbor-system. Please refer to Basic Configuration section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#basic-configuration"
type: string
label: Username
required: true
group: "General"
show_if: "EULA=yes"
- variable: password
default: ""
description: "Dkube operator's local sigh-in password"
type: password
label: Password
required: true
group: "General"
show_if: "EULA=yes"
- variable: version
default: "3.2.0.1"
description: "Version of dkube to be installed"
type: string
label: Dkube version
required: true
group: "General"
show_if: "EULA=yes"
- variable: provider
default: "dkube"
description: "Kubernetes provider: Choose one of dkube/gke/okd/eks/ntnx/tanzu"
type: enum
label: Kube Provider
required: true
options:
- "dkube"
- "gke"
- "okd"
- "eks"
- "ntnx"
- "tanzu"
group: "General"
show_if: "EULA=yes"
- variable: ha
default: "false"
description: "When HA=true k8s cluster must have min 3 schedulable nodes. Please refer to resilient operation section of the installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#resilient-operation"
type: boolean
label: HA
required: true
group: "General"
show_if: "EULA=yes"
- variable: wipedata
default: yes
description: "Wipe dkube data during helm operation install/uninstall. Choose one of yes/no"
type: enum
label: Wipe Data
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
- variable: minimal
default: no
description: "To install minimal version of dkube. Choose one of yes/no"
type: enum
label: Minimal
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
- variable: airgap
default: no
description: "To install air-gapped version of dkube. Choose one of yes/no"
type: enum
label: Airgap
required: true
options:
- "yes"
- "no"
group: "General"
show_if: "EULA=yes"
# registry
- variable: registry.name
default: "docker.io/ocdr"
description: "Repository from where Dkube images can be picked. Format: registry/[repo]. Please contact support@dkube.io for Dkube registry details"
type: string
label: Dkube images registry
required: true
group: "Registry"
show_if: "EULA=yes"
- variable: registry.username
default: ""
description: "Container registry username"
type: string
label: Dkube images registry username
required: true
group: "Registry"
show_if: "EULA=yes"
- variable: registry.password
default: ""
description: "Container registry password"
type: password
label: Dkube images registry password
required: true
group: "Registry"
show_if: "EULA=yes"
# STORAGE
- variable: optional.storage.type
default: "disk"
description: "Type of storage. Note: ceph storage type can only be use with HA=true And pv or sc can only be used with HA=false. Please refer to Storage options section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#storage-options"
type: enum
label: Dkube storage type
options:
- "disk"
- "nfs"
- "ceph"
- "pv"
- "sc"
group: "Storage"
show_if: "EULA=yes"
subquestions:
- variable: optional.storage.path
default: "/var/dkube"
description: "Localpath on the storage node"
type: string
label: Dkube storage disk path
show_if: "optional.storage.type=disk"
- variable: optional.storage.node
default: ""
description: "Node name for dkube storage. Provide hostname of the master node if Kube provider is dkube"
type: string
label: Dkube storage disk node
show_if: "optional.storage.type=disk"
- variable: optional.storage.persistentVolume
default: ""
description: "Name of persistent volume to be used for storage"
type: string
label: Storage PV
show_if: "ha=false&&optional.storage.type=pv"
- variable: optional.storage.storageClass
default: ""
description: "Name of storage class to be used for storage. Make sure dynamic provisioner is running for the storage class name"
type: string
label: Storage class
show_if: "ha=false&&optional.storage.type=sc"
- variable: optional.storage.nfsServer
default: ""
description: "NFS server ip to be used for storage"
type: string
label: NFS Server
show_if: "optional.storage.type=nfs"
- variable: optional.storage.nfsPath
default: ""
description: "NFS path (Make sure the path exists)"
type: string
label: NFS path
show_if: "optional.storage.type=nfs"
- variable: optional.storage.cephMonitors
default: ""
description: "Comma separated IPs of ceph monitors"
type: string
label: Ceph monitors
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephSecret
default: ""
description: "Ceph secret"
type: string
label: Ceph Secret
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephFilesystem
default: ""
description: "Ceph Filesystem"
type: string
label: Ceph Filesystem
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephNamespace
default: ""
description: "Ceph Namespace"
type: string
label: Ceph Namespace
show_if: "optional.storage.type=ceph"
- variable: optional.storage.cephPath
default: "/var/lib/rook"
description: "Ceph data and configuration path for internal ceph. Internal ceph is installed when HA=true and Storage type is not equal to nfs or ceph"
type: string
label: Ceph storage path
#show_if: "ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
show_if: "ha=true"
- variable: optional.storage.cephDisk
default: ""
description: "Only for internal ceph from release 2.2.1.12. Disk name for internal ceph storage. It should be a raw formatted disk. E.g: sdb"
type: string
label: Ceph Storage Disk
#show_if: "ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
show_if: "ha=true"
# Loadbalancer
- variable: optional.loadbalancer.access
default: "nodeport"
description: "Type of dkube proxy service, possible values are nodeport and loadbalancer; Please use loadbalancer if kubeProvider is gke."
type: enum
label: Dkube access type
group: "Loadbalancer"
#show_if: "EULA=yes&&ha=true"
#show_if: "EULA=yes&&ha=true&&optional.storage.type!=ceph&&optional.storage.type!=nfs"
#show_if: "ha=true&&optional.storage.type=ceph"
options:
- "loadbalancer"
- "nodeport"
show_subquestion_if: loadbalancer
show_if: "EULA=yes"
subquestions:
- variable: optional.loadbalancer.metallb
default: false
description: "Set true to install MetalLB Loadbalancer. Please refer to Load Balancer options section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#load-balancer-options"
type: string
label: MetalLB Loadbalancer
- variable: optional.loadbalancer.vipPool
default: ""
description: "Valid only if installLoadbalancer is true; Only CIDR notation is allowed. E.g: 192.168.2.0/24"
type: string
label: Loadbalancer VipPool
show_if: "EULA=yes"
# Modelmonitor
- variable: optional.modelmonitor.enabled
default: "false"
description: "To enable modelmonitor in dkube. (true / false). Please refer to Model Monitor section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#model-monitor"
type: boolean
label: Enable Modelmonitor
group: "General"
show_if: "EULA=yes"
# DBAAS
- variable: optional.DBAAS.database
default: ""
description: "To configure external database for dkube. Supported mysql, sqlserver(mssql). Empty will pickup default sql db installed with dkube. Please refer to section External Database of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#external-database"
type: string
label: database
group: "DBAAS"
show_if: "EULA=yes"
- variable: optional.DBAAS.dsn
default: ""
description: "Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html"
type: string
label: dsn
group: "DBAAS"
show_if: "EULA=yes"
# CICD
- variable: optional.CICD.enabled
default: "false"
description: "To enable tekton cicd with dkube. (true / false). Please refer to CICD section of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#ci-cd"
type: boolean
label: CICD Enabled
group: "CICD"
show_if: "EULA=yes"
show_subquestion_if: true
subquestions:
- variable: optional.CICD.registryName
default: false
description: "Docker registry where CICD built images will be saved"
type: string
label: Docker registry name
- variable: optional.CICD.registryUsername
default: false
description: "Docker registry Username"
type: string
label: Docker registry Username
- variable: optional.CICD.registryPassword
default: false
description: "Docker registry password"
type: string
label: Docker registry Password
- variable: optional.CICD.IAMRole
default: false
description: "For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com. registryName: 'aws_account_id.dkr.ecr.region.amazonaws.com' Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess; IAMRole: 'arn:aws:iam::<aws_account_id>:role/<iam-role>'"
type: string
label: IAMRole
# Node Affinity
- variable: optional.nodeAffinity.dkubeNodesLabel
default: ""
description: "Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA. Example: DKUBE_NODES_LABEL: key1=value1. Please refer to section Node Affinity of installation guide. https://dkube.io/install/install3_x/Install-Advanced.html#node-affinity"
type: string
label: DKUBE_NODES_LABEL
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.dkubeNodesTaints
default: ""
description: "Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes. Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: DKUBE_NODES_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.gpuWorkloadTaints
default: ""
description: "Taints of the nodes where gpu workloads must be scheduled. Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: GPU_WORKLOADS_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.nodeAffinity.productionWorkloadTaints
default: ""
description: "Taints of the nodes where production workloads must be scheduled. Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule"
type: string
label: PRODUCTION_WORKLOADS_TAINTS
group: "NodeAffinity"
show_if: "EULA=yes"
- variable: optional.dkubeDockerhubCredentialsSecret
default: ""
description: "Dockerhub Secrets for OCDR images. If you don't create, this will be auto-created with default values."
type: string
label: DKUBE DOCKERHUB CREDENTIALS SECRET
group: "General"
show_if: "EULA=yes"
- variable: optional.IAMRole
default: ""
description: "AWS IAM role. Valid only if KUBE_PROVIDER=eks. This will be set as an annotation in few deployments. Format should be like: IAMRole: '<key>: <iam role>' eg: IAMRole: 'iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole'"
type: string
label: IAMRole
group: "General"
show_if: "EULA=yes&&provider=eks"

View File

@ -0,0 +1,7 @@
Installing Dkube {{ .Values.version }}
DKube Installation has started. Please use the commands below to view the installation progress. The commands are for installation only. Do not use them for upgrade.
kubectl wait --for=condition=ready --timeout=5m pod -l job-name=dkube-helm-installer
kubectl logs -l job-name=dkube-helm-installer --follow --tail=-1 && kubectl wait --for=condition=complete --timeout=30m job/dkube-helm-installer

View File

@ -0,0 +1,53 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "dkube-deployer.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dkube-deployer.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "dkube-deployer.labels" -}}
helm.sh/chart: {{ include "dkube-deployer.chart" . }}
{{ include "dkube-deployer.selectorLabels" . }}
app.kubernetes.io/version: {{ .Values.version | quote }}
app.kubernetes.io/managed-by: "dkube.io"
{{- end }}
{{/*
Selector labels
*/}}
{{- define "dkube-deployer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dkube-deployer.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Image pull secret
*/}}
{{- define "dkube-deployer.imagePullSecretData" -}}
{{- with .Values.registry }}
{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"ocdlgit@oneconvergence.com\",\"auth\":\"%s\"}}}" .name .username .password (printf "%s:%s" .username .password | b64enc) | b64enc }}
{{- end }}
{{- end }}
{{/*
model catalog enable flag
*/}}
{{- define "dkube-deployer.modelCatalog" -}}
{{- if hasPrefix "2.1" .Values.version }}
{{- printf "false" }}
{{- else }}
{{- printf "true" }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,167 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dkube-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
data:
dkube.ini: |
#################################################################
# #
# DKUBE CONFIG FILE #
# #
#################################################################
[REQUIRED]
# Choose one of dkube/gke/okd/eks/ntnx
KUBE_PROVIDER={{ .Values.provider }}
# When HA=true k8s cluster must have min 3 schedulable nodes
HA={{ .Values.ha }}
# Operator's Local Sign In Details
# Username cannot be same as that of a namespace's name.
# Also, following names are restricted- dkube, monitoring, kubeflow
# '$' is not supported
USERNAME={{ .Values.username }}
PASSWORD={{ .Values.password }}
# To wipe dkube storage
# Accepted values: yes/no
WIPEDATA={{ .Values.wipedata }}
# To install minimal version of dkube
# Accepted values: yes/no
MINIMAL={{ .Values.minimal }}
# To install air-gapped version of dkube
# Accepted values: yes/no
AIRGAP={{ .Values.airgap }}
[NODE-AFFINITY]
# Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA
# Example: DKUBE_NODES_LABEL: key1=value1
DKUBE_NODES_LABEL: {{ .Values.optional.nodeAffinity.dkubeNodesLabel }}
# Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes
# Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
DKUBE_NODES_TAINTS: {{ .Values.optional.nodeAffinity.dkubeNodesTaints }}
# Taints of the nodes where gpu workloads must be scheduled.
# Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
GPU_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.gpuWorkloadTaints }}
# Taints of the nodes where production workloads must be scheduled.
# Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
PRODUCTION_WORKLOADS_TAINTS: {{ .Values.optional.nodeAffinity.productionWorkloadTaints }}
[OPTIONAL]
# version of dkube installer to be used
DKUBE_INSTALLER_VERSION={{ .Values.version }}
# version of dkube to be installed
DKUBE_VERSION={{ .Values.version }}
# Dockerhub Secrets for OCDR images
# If you don't create, this will be auto-created with default values.
DKUBE_DOCKERHUB_CREDENTIALS_SECRET={{ .Values.optional.dkubeDockerhubCredentialsSecret }}
# TLS Secret of Operator's Certificate & Private Key
# If you don't create, place your certificate and private key in $HOME/.dkube
DKUBE_OPERATOR_CERTIFICATE=
# Repository from where Dkube images can be picked.
# Format: registry/[repo]
DKUBE_REGISTRY={{ .Values.registry.name }}
# Container registry username
REGISTRY_UNAME={{ .Values.registry.username }}
# Container registry password
REGISTRY_PASSWD={{ .Values.registry.password }}
# AWS IAM role
# Valid only if KUBE_PROVIDER=eks
# This will be set as an annotation in few deployments
# Format should be like:
# IAM_ROLE=<key>: <iam role>
# eg: IAM_ROLE=iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole
# Note: Don't enclose with quotes
IAM_ROLE={{ .Values.optional.IAMRole }}
[EXTERNAL]
# Type of dkube proxy service, possible values are nodeport and loadbalancer
ACCESS={{ .Values.optional.loadbalancer.access }}
# 'true' - to install MetalLB Loadbalancer
# Must fill LB_VIP_POOL if true
INSTALL_LOADBALANCER={{ .Values.optional.loadbalancer.metallb }}
# Only CIDR notation is allowed. E.g: 192.168.2.0/24
# Valid only if INSTALL_LOADBALANCER=true
LB_VIP_POOL={{ .Values.optional.loadbalancer.vipPool }}
[STORAGE]
# Type of storage
# Possible values: disk, pv, sc, nfs
# Following are required fields for corresponding storage type
# -------------------------------------------------------
# STORAGE_TYPE REQUIRED_FIELDS
# -------------------------------------------------------
# disk STORAGE_DISK_NODE and STORAGE_DISK_PATH
# pv STORAGE_PV
# sc STORAGE_SC
# nfs STORAGE_NFS_SERVER and STORAGE_NFS_PATH
# ceph STORAGE_CEPH_MONITORS and STORAGE_CEPH_SECRET
# For 2.2.1.12 and later
# ceph STORAGE_CEPH_FILESYSTEM and STORAGE_CEPH_NAMESPACE
STORAGE_TYPE={{ .Values.optional.storage.type }}
# Localpath on the storage node
STORAGE_DISK_PATH={{ .Values.optional.storage.path }}
# Nodename of the storage node
# Possible values: AUTO/<nodename>
# AUTO - Master node will be chosen for storage if KUBE_PROVIDER=dkube
STORAGE_DISK_NODE={{ .Values.optional.storage.node }}
# Name of persistent volume
STORAGE_PV={{ .Values.optional.storage.persistentVolume }}
# Name of storage class name
# Make sure dynamic provisioner is running for the storage class name
STORAGE_SC={{ .Values.optional.storage.storageClass }}
# NFS server ip
STORAGE_NFS_SERVER={{ .Values.optional.storage.nfsServer }}
# NFS path (Make sure the path exists)
STORAGE_NFS_PATH={{ .Values.optional.storage.nfsPath }}
# Comma separated IPs of ceph monitors
STORAGE_CEPH_MONITORS={{ .Values.optional.storage.cephMonitors }}
# Ceph secret
STORAGE_CEPH_SECRET={{ .Values.optional.storage.cephSecret }}
# Name of the ceph filesystem
# E.g: dkubefs
STORAGE_CEPH_FILESYSTEM={{ .Values.optional.storage.cephFilesystem }}
# Name of the namespace where ceph is installed
# E.g: rook-ceph
STORAGE_CEPH_NAMESPACE={{ .Values.optional.storage.cephNamespace }}
# Internal Ceph
# Internal ceph is installed when HA=true and STORAGE_TYPE is not in ("nfs", "ceph")
# Both the following fields are compulsory
# Configuration path for internal ceph
STORAGE_CEPH_PATH={{ .Values.optional.storage.cephPath }}
# Disk name for internal ceph storage
# It should be a raw formatted disk
# E.g: sdb
STORAGE_CEPH_DISK={{ .Values.optional.storage.cephDisk }}
[MODELMONITOR]
#To enable modelmonitor in dkube. (true / false)
ENABLED={{ .Values.optional.modelmonitor.enabled }}
[CICD]
#To enable tekton cicd with dkube. (true / false)
ENABLED={{ .Values.optional.CICD.enabled }}
#Docker registry where CICD built images will be saved.
#For DockerHub, enter docker.io/<username>
DOCKER_REGISTRY={{ .Values.optional.CICD.registryName }}
REGISTRY_USERNAME={{ .Values.optional.CICD.registryUsername }}
REGISTRY_PASSWORD={{ .Values.optional.CICD.registryPassword }}
#For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com.
#DOCKER_REGISTRY=aws_account_id.dkr.ecr.region.amazonaws.com
#Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM
#based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess
IAM_ROLE={{ .Values.optional.CICD.IAMRole }}
[MODEL-CATALOG]
#To enable model catalog with dkube. (true / false)
ENABLED={{ template "dkube-deployer.modelCatalog" . }}
#To configure external database for dkube
[DBAAS]
#Supported mysql, sqlserver(mssql)
#Empty will pickup default sql db installed with dkube.
DATABASE={{ .Values.optional.DBAAS.database }}
#Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html
DSN={{ .Values.optional.DBAAS.dsn }}

View File

@ -0,0 +1,47 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-uninstaller-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-uninstaller-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
hostPID: true
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-uninstaller-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
-
mountPath: /root/.dkube/dkube.ini
name: dkube-config
subPath: dkube.ini
{{- if eq .Values.wipedata "yes" }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "uninstall", "--wipe-data"]
{{- else }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "uninstall"]
{{- end }}
serviceAccountName: dkube-deployer-sa
volumes:
-
configMap:
name: dkube-config
name: dkube-config

View File

@ -0,0 +1,67 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-upgrade-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "-1"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-upgrade-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-upgrade-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "upgrade", {{ .Values.version | quote}}]
serviceAccountName: dkube-deployer-sa
---
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-installer-job-cleanup-hook"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-upgrade,post-upgrade
"helm.sh/hook-weight": "-2"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-installer-job-cleanup-hook"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-installer-job-cleanup-hook
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
command: ["/bin/sh", "-c"]
args:
- kubectl delete job dkube-helm-installer --ignore-not-found=true
serviceAccountName: dkube-deployer-sa

View File

@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: Job
metadata:
name: "dkube-helm-installer"
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
spec:
backoffLimit: 0
template:
metadata:
name: "dkube-helm-installer"
labels:
{{- include "dkube-deployer.selectorLabels" . | nindent 8 }}
spec:
hostPID: true
restartPolicy: Never
imagePullSecrets:
- name: dkube-dockerhub-secret
containers:
- name: dkube-helm-installer
image: {{ .Values.registry.name }}/dkubeadm:{{ .Values.version }}
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
-
mountPath: /root/.dkube/dkube.ini
name: dkube-config
subPath: dkube.ini
{{- if eq .Values.wipedata "yes" }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "install", "--accept-eula=yes", "--wipe-data"]
{{- else }}
command: ["/opt/dkubeadm/dkubeadm.sh", "dkube", "install", "--accept-eula={{ .Values.EULA }}"]
{{- end }}
serviceAccountName: dkube-deployer-sa
volumes:
-
configMap:
name: dkube-config
name: dkube-config

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: dkube-dockerhub-secret
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "dkube-deployer.imagePullSecretData" . }}

View File

@ -0,0 +1,136 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dkube-deployer-binding
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dkube-deployer-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dkube-deployer-sa
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dkube-deployer-clusterrole
namespace: {{ .Release.Namespace }}
labels:
{{- include "dkube-deployer.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/portforward
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
- update
- patch
- create
- delete
- apiGroups:
- kubeflow.org
resources:
- tfjobs
verbs:
- '*'
- apiGroups:
- kubeflow.org
resources:
- mpijobs
verbs:
- '*'
- apiGroups:
- '*'
resources:
- replicasets
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- ""
resources:
- events
verbs:
- list
- apiGroups:
- ""
resources:
- persistentvolumes
- persistentvolumeclaims
- services
- endpoints
- configmaps
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- nodes
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
- clusterroles
- clusterrolebindings
verbs:
- '*'
- apiGroups:
- ""
resources:
- serviceaccounts
- secrets
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- '*'

View File

@ -0,0 +1,205 @@
{
"$schema":"http://json-schema.org/draft-07/schema",
"type":"object",
"title":"The root schema",
"description":"The root schema comprises the entire JSON document.",
"required":[
"EULA",
"username",
"password",
"version",
"provider",
"ha",
"wipedata",
"registry",
"optional"
],
"properties":{
"provider":{
"$id":"#/properties/provider",
"enum": ["dkube", "gke", "okd", "eks", "ntnx", "tanzu"]
},
"username":{
"$id":"#/properties/username",
"type":"string",
"minLength":1
},
"password":{
"$id":"#/properties/password",
"type":"string",
"minLength":1
},
"EULA":{
"$id":"#/properties/EULA",
"type":"string",
"enum": ["yes"]
},
"ha":{
"$id":"#/properties/ha",
"type":"boolean"
},
"wipedata":{
"$id":"#/properties/wipedata",
"type":"string",
"enum": ["yes", "no"]
},
"registry":{
"$id":"#/properties/registry",
"type":"object",
"required": [
"name",
"username",
"password"
],
"properties":{
"name":{
"$id":"#/properties/registry/properties/name",
"type":"string",
"minLength":1
},
"username":{
"$id":"#/properties/registry/properties/username",
"type":"string",
"minLength":1
},
"password":{
"$id":"#/properties/registry/properties/password",
"type":"string",
"minLength":1
}
}
},
"optional":{
"$id":"#/properties/optional",
"type":"object",
"required": [
"storage"
],
"properties":{
"storage":{
"$id":"#/properties/optional/properties/storage",
"type":"object",
"properties": {
"type": {
"enum": ["disk", "pv", "sc", "nfs", "ceph"]
}
},
"allOf":[
{
"if": {
"properties": {"type": {"const": "disk"}}
},
"then": {
"$ref": "#/properties/optional/definitions/disk"
}
},
{
"if": {
"properties": {"type": {"const": "pv"}}
},
"then": {
"$ref": "#/properties/optional/definitions/pv"
}
},
{
"if": {
"properties": {"type": {"const": "sc"}}
},
"then": {
"$ref": "#/properties/optional/definitions/sc"
}
},
{
"if": {
"properties": {"type": {"const": "nfs"}}
},
"then": {
"$ref": "#/properties/optional/definitions/nfs"
}
},
{
"if": {
"properties": {"type": {"const": "ceph"}}
},
"then": {
"$ref": "#/properties/optional/definitions/ceph"
}
}
]
}
},
"definitions":{
"disk":{
"properties":{
"path":{
"type":"string",
"pattern":"^(/[^/ ]*)+/?$"
},
"node":{
"type":"string",
"minLength": 1
}
},
"required":[
"path",
"node"
]
},
"pv":{
"properties":{
"persistentVolume":{
"type":"string",
"minLength": 1
}
},
"required":[
"persistentVolume"
]
},
"sc":{
"properties":{
"storageClass":{
"type":"string",
"minLength": 1
}
},
"required":[
"storageClass"
]
},
"nfs":{
"properties":{
"nfsPath":{
"type":"string",
"pattern":"^(/[^/ ]*)+/?$"
},
"nfsServer":{
"type":"string",
"minLength": 1
}
},
"required":[
"nfsPath",
"nfsServer"
]
},
"ceph":{
"properties":{
"cephMonitors":{
"type":"string"
},
"cephSecret":{
"type":"string"
},
"cephFilesystem":{
"type":"string"
},
"cephNamespace":{
"type":"string"
}
}
}
}
}
}
}

View File

@ -0,0 +1,182 @@
# The DKube EULA is available at: www.oneconvergence.com/EULA/One-Convergence-EULA.pdf
# By accepting this license agreement you acknowledge that you agree to the terms and conditions.
# The installation will only proceed if the EULA is accepted by defining the EULA value as "yes".
EULA: ""
# Operator's Local Sign In Details.
# Username cannot be same as that of a kubernetes namespace's name.
# Names like dkube, monitoring, kubeflow are restricted.
username: ""
password: ""
# dkube version
version: "3.2.0.1"
# Choose one of dkube/gke/okd/eks/ntnx/tanzu kube provider
provider: "dkube"
# For ha deployment, k8s cluster must have min 3 schedulable nodes
ha: false
# Wipe dkube data during helm operation install/uninstall.
# Choose one of yes/no
wipedata: ""
# To install minimal version of dkube
# Accepted values: yes/no
minimal: "no"
# To install air-gapped version of dkube
# Accepted values: yes/no
airgap: "no"
# Docker registry for DKube installation
registry:
# Format: registry/[repo]
name: "docker.io/ocdr"
# Container registry username
username: ""
# Container registry password
password: ""
optional:
storage:
# Type of storage
# Possible values: disk, pv, sc, nfs, ceph
# Following are required fields for corresponding storage type
# -------------------------------------------------------
# STORAGE_TYPE REQUIRED_FIELDS
# -------------------------------------------------------
# disk node and path
# pv persistentVolume
# sc storageClass
# nfs nfsServer and nfsPath
# ceph cephMonitors and cephSecret
# For release 2.2.1.12 and later
# ceph cephFilesystem and cephNamespace
type: "disk"
# Localpath on the storage node
path: "/var/dkube"
# Nodename of the storage node
# Possible values: AUTO/<nodename>
# AUTO - Master node will be chosen for storage if KUBE_PROVIDER=dkube
node: ""
# Name of persistent volume
persistentVolume: ""
# Name of storage class name
# Make sure dynamic provisioner is running for the storage class name
storageClass: ""
# NFS server ip
nfsServer: ""
# NFS path (Make sure the path exists)
nfsPath: ""
# Only for external ceph before release 2.2.1.12
# Comma separated IPs of ceph monitors
cephMonitors: ""
# Only for external ceph before release 2.2.1.12
# Ceph secret
cephSecret: ""
# Only for external ceph from release 2.2.1.12
# Name of the ceph filesystem
# E.g: dkubefs
cephFilesystem: ""
# Only for external ceph from release 2.2.1.12
# Name of the namespace where ceph is installed
# E.g: rook-ceph
cephNamespace: ""
# Internal Ceph
# Internal ceph is installed when HA=true and STORAGE_TYPE is not in ("nfs", "ceph")
# Configuration path for internal ceph
cephPath: "/var/lib/rook"
# Only for internal ceph from release 2.2.1.12
# Disk name for internal ceph storage
# It should be a raw formatted disk
# E.g: sdb
cephDisk: ""
loadbalancer:
# Type of dkube proxy service, possible values are nodeport and loadbalancer
# Please use loadbalancer if kubeProvider is gke.
access: "nodeport"
# 'true' - to install MetalLB Loadbalancer
# Must fill LB_VIP_POOL if true
metallb: "false"
# Only CIDR notation is allowed. E.g: 192.168.2.0/24
# Valid only if installLoadbalancer is true
vipPool: ""
modelmonitor:
#To enable modelmonitor in dkube. (true / false)
enabled: false
DBAAS:
# To configure external database for dkube
# Supported mysql, sqlserver(mssql)
# Empty will pickup default sql db installed with dkube
database: ""
# Syntaxes here can be followed to specify dsn https://gorm.io/docs/connecting_to_the_database.html
dsn: ""
CICD:
#To enable tekton cicd with dkube. (true / false)
enabled: false
#Docker registry where CICD built images will be saved.
registryName: "docker.io/ocdr"
registryUsername: ""
registryPassword: ""
#For AWS ECR on EKS K8S cluster, enter registry as aws_account_id.dkr.ecr.region.amazonaws.com.
#registryName: "aws_account_id.dkr.ecr.region.amazonaws.com"
#Worker nodes should either have AmazonEC2ContainerRegistryFullAccess or if you are using KIAM
#based IAM control, provide an IAM role which has AmazonEC2ContainerRegistryFullAccess
#IAMRole: "arn:aws:iam::<aws_account_id>:role/<iam-role>"
IAMRole: ""
nodeAffinity:
# Nodes identified by labels on which the dkube pods must be scheduled.. Say management nodes. Unfilled means no binding. When filled there needs to be minimum of 3nodes in case of HA and one node in case of non-HA
# Example: DKUBE_NODES_LABEL: key1=value1
dkubeNodesLabel: ""
# Nodes to be tolerated by dkube control plane pods so that only they can be scheduled on the nodes
# Example: DKUBE_NODES_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
dkubeNodesTaints: ""
# Taints of the nodes where gpu workloads must be scheduled.
# Example: GPU_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
gpuWorkloadTaints: ""
# Taints of the nodes where production workloads must be scheduled.
# Example: PRODUCTION_WORKLOADS_TAINTS: key1=value1:NoSchedule,key2=value2:NoSchedule
productionWorkloadTaints: ""
# Dockerhub Secrets for OCDR images
# If you don't create, this will be auto-created with default values.
dkubeDockerhubCredentialsSecret: "dkube-dockerhub-secret"
# AWS IAM role
# Valid only if KUBE_PROVIDER=eks
# This will be set as an annotation in few deployments
# Format should be like:
# IAMRole: "<key>: <iam role>"
# eg: IAMRole: "iam.amazonaws.com/role: arn:aws:iam::123456789012:role/myrole"
IAMRole: ""

View File

@ -1015,6 +1015,29 @@ entries:
- assets/datadog/datadog-2.4.200.tgz
version: 2.4.200
dkube-deployer:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dkube
catalog.cattle.io/release-name: dkube
apiVersion: v2
appVersion: 3.2.0.1
created: "2022-07-07T11:59:10.38345804+05:30"
description: A Kubernetes-based MLOps platform based on open standards Kubeflow
and MLflow
digest: 97ebdc02ae42e565a2851bd2c789adba06be8950560184bdf4662e62a117f86d
home: https://dkube.io
icon: https://www.dkube.io/img/logo_new.png
keywords:
- kubernetes
- MLOps
- Kubeflow
- AI
kubeVersion: "1.20"
name: dkube-deployer
type: application
urls:
- assets/dkube/dkube-deployer-1.0.602.tgz
version: 1.0.602
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dkube