Merge pull request #2114 from rayandas/prepare-charts-for-2.7.0

Update charts to Rancher v2.7
pull/2116/head
Jono Mercier 2022-09-27 11:42:28 -07:00 committed by GitHub
commit 2466942ff6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
133 changed files with 4431 additions and 19 deletions

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-aks-operator-crd
apiVersion: v2
appVersion: 1.0.7-rc1
description: AKS Operator CustomResourceDefinitions
name: rancher-aks-operator-crd
version: 101.0.0+up1.0.7-rc1

View File

@ -0,0 +1,178 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: aksclusterconfigs.aks.cattle.io
spec:
group: aks.cattle.io
names:
kind: AKSClusterConfig
plural: aksclusterconfigs
shortNames:
- akscc
singular: aksclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
authBaseUrl:
nullable: true
type: string
authorizedIpRanges:
items:
nullable: true
type: string
nullable: true
type: array
azureCredentialSecret:
nullable: true
type: string
baseUrl:
nullable: true
type: string
clusterName:
nullable: true
type: string
dnsPrefix:
nullable: true
type: string
dnsServiceIp:
nullable: true
type: string
dockerBridgeCidr:
nullable: true
type: string
httpApplicationRouting:
nullable: true
type: boolean
imported:
type: boolean
kubernetesVersion:
nullable: true
type: string
linuxAdminUsername:
nullable: true
type: string
loadBalancerSku:
nullable: true
type: string
logAnalyticsWorkspaceGroup:
nullable: true
type: string
logAnalyticsWorkspaceName:
nullable: true
type: string
monitoring:
nullable: true
type: boolean
networkPlugin:
nullable: true
type: string
networkPolicy:
nullable: true
type: string
nodePools:
items:
properties:
availabilityZones:
items:
nullable: true
type: string
nullable: true
type: array
count:
nullable: true
type: integer
enableAutoScaling:
nullable: true
type: boolean
maxCount:
nullable: true
type: integer
maxPods:
nullable: true
type: integer
minCount:
nullable: true
type: integer
mode:
nullable: true
type: string
name:
nullable: true
type: string
orchestratorVersion:
nullable: true
type: string
osDiskSizeGB:
nullable: true
type: integer
osDiskType:
nullable: true
type: string
osType:
nullable: true
type: string
vmSize:
nullable: true
type: string
type: object
nullable: true
type: array
podCidr:
nullable: true
type: string
privateCluster:
nullable: true
type: boolean
resourceGroup:
nullable: true
type: string
resourceLocation:
nullable: true
type: string
serviceCidr:
nullable: true
type: string
sshPublicKey:
nullable: true
type: string
subnet:
nullable: true
type: string
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
virtualNetwork:
nullable: true
type: string
virtualNetworkResourceGroup:
nullable: true
type: string
type: object
status:
properties:
failureMessage:
nullable: true
type: string
phase:
nullable: true
type: string
rbacEnabled:
nullable: true
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-aks-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: aksclusterconfigs.aks.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-aks-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.0.7-rc1
description: A Helm chart for provisioning AKS clusters
home: https://github.com/rancher/aks-operator
name: rancher-aks-operator
sources:
- https://github.com/rancher/aks-operator
version: 101.0.0+up1.0.7-rc1

View File

@ -0,0 +1,4 @@
You have deployed the Rancher AKS operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions AKS clusters
from AKSClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aks-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch', 'update']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs/status']
verbs: ['update']

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aks-operator
namespace: cattle-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aks-operator
subjects:
- kind: ServiceAccount
name: aks-operator
namespace: cattle-system

View File

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aks-config-operator
namespace: cattle-system
spec:
replicas: 1
selector:
matchLabels:
ke.cattle.io/operator: aks
template:
metadata:
labels:
ke.cattle.io/operator: aks
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
serviceAccountName: aks-operator
securityContext:
fsGroup: 1007
runAsUser: 1007
containers:
- name: aks-operator
image: {{ template "system_default_registry" . }}{{ .Values.aksOperator.image.repository }}:{{ .Values.aksOperator.image.tag }}
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PROXY
value: {{ .Values.httpProxy }}
- name: HTTPS_PROXY
value: {{ .Values.httpsProxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.additionalTrustedCAs }}
# aks-operator mounts the additional CAs in two places:
volumeMounts:
# This directory is owned by the aks-operator user so c_rehash works here.
- mountPath: /etc/rancher/ssl/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
# This directory is root-owned so c_rehash doesn't work here,
# but the cert is here in case update-ca-certificates is called in the future or by the OS.
- mountPath: /etc/pki/trust/anchors/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 0400
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: cattle-system
name: aks-operator

View File

@ -0,0 +1,21 @@
global:
cattle:
systemDefaultRegistry: ""
aksOperator:
image:
repository: rancher/aks-operator
tag: v1.0.7-rc1
httpProxy: ""
httpsProxy: ""
noProxy: ""
additionalTrustedCAs: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-eks-operator-crd
apiVersion: v2
appVersion: 1.1.5-rc1
description: EKS Operator CustomResourceDefinitions
name: rancher-eks-operator-crd
version: 101.0.0+up1.1.5-rc1

View File

@ -0,0 +1,217 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: eksclusterconfigs.eks.cattle.io
spec:
group: eks.cattle.io
names:
kind: EKSClusterConfig
plural: eksclusterconfigs
shortNames:
- ekscc
singular: eksclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
amazonCredentialSecret:
nullable: true
type: string
displayName:
nullable: true
type: string
imported:
type: boolean
kmsKey:
nullable: true
type: string
kubernetesVersion:
nullable: true
type: string
loggingTypes:
items:
nullable: true
type: string
nullable: true
type: array
nodeGroups:
items:
properties:
desiredSize:
nullable: true
type: integer
diskSize:
nullable: true
type: integer
ec2SshKey:
nullable: true
type: string
gpu:
nullable: true
type: boolean
imageId:
nullable: true
type: string
instanceType:
nullable: true
type: string
labels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
launchTemplate:
nullable: true
properties:
id:
nullable: true
type: string
name:
nullable: true
type: string
version:
nullable: true
type: integer
type: object
maxSize:
nullable: true
type: integer
minSize:
nullable: true
type: integer
nodegroupName:
nullable: true
type: string
requestSpotInstances:
nullable: true
type: boolean
resourceTags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
spotInstanceTypes:
items:
nullable: true
type: string
nullable: true
type: array
subnets:
items:
nullable: true
type: string
nullable: true
type: array
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
userData:
nullable: true
type: string
version:
nullable: true
type: string
required:
- nodegroupName
type: object
nullable: true
type: array
privateAccess:
nullable: true
type: boolean
publicAccess:
nullable: true
type: boolean
publicAccessSources:
items:
nullable: true
type: string
nullable: true
type: array
region:
nullable: true
type: string
secretsEncryption:
nullable: true
type: boolean
securityGroups:
items:
nullable: true
type: string
nullable: true
type: array
serviceRole:
nullable: true
type: string
subnets:
items:
nullable: true
type: string
nullable: true
type: array
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
type: object
status:
properties:
failureMessage:
nullable: true
type: string
managedLaunchTemplateID:
nullable: true
type: string
managedLaunchTemplateVersions:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
networkFieldsSource:
nullable: true
type: string
phase:
nullable: true
type: string
securityGroups:
items:
nullable: true
type: string
nullable: true
type: array
subnets:
items:
nullable: true
type: string
nullable: true
type: array
templateVersionsToDelete:
items:
nullable: true
type: string
nullable: true
type: array
virtualNetwork:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-eks-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: eksclusterconfigs.eks.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-eks-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.1.5-rc1
description: A Helm chart for provisioning EKS clusters
home: https://github.com/rancher/eks-operator
name: rancher-eks-operator
sources:
- https://github.com/rancher/eks-operator
version: 101.0.0+up1.1.5-rc1

View File

@ -0,0 +1,4 @@
You have deployed the Rancher EKS operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions EKS clusters
from EKSClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: eks-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch']
- apiGroups: ['eks.cattle.io']
resources: ['eksclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['eks.cattle.io']
resources: ['eksclusterconfigs/status']
verbs: ['update']

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: eks-operator
namespace: cattle-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eks-operator
subjects:
- kind: ServiceAccount
name: eks-operator
namespace: cattle-system

View File

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: eks-config-operator
namespace: cattle-system
spec:
replicas: 1
selector:
matchLabels:
ke.cattle.io/operator: eks
template:
metadata:
labels:
ke.cattle.io/operator: eks
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
serviceAccountName: eks-operator
securityContext:
fsGroup: 1007
runAsUser: 1007
containers:
- name: eks-operator
image: {{ template "system_default_registry" . }}{{ .Values.eksOperator.image.repository }}:{{ .Values.eksOperator.image.tag }}
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PROXY
value: {{ .Values.httpProxy }}
- name: HTTPS_PROXY
value: {{ .Values.httpsProxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.additionalTrustedCAs }}
# eks-operator mounts the additional CAs in two places:
volumeMounts:
# This directory is owned by the eks-operator user so c_rehash works here.
- mountPath: /etc/rancher/ssl/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
# This directory is root-owned so c_rehash doesn't work here,
# but the cert is here in case update-ca-certificates is called in the future or by the OS.
- mountPath: /etc/pki/trust/anchors/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 0400
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: cattle-system
name: eks-operator

View File

@ -0,0 +1,20 @@
global:
cattle:
systemDefaultRegistry: ""
eksOperator:
image:
repository: rancher/eks-operator
tag: v1.1.5-rc1
httpProxy: ""
httpsProxy: ""
noProxy: ""
additionalTrustedCAs: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-gke-operator-crd
apiVersion: v2
appVersion: 1.1.4
description: GKE Operator CustomResourceDefinitions
name: rancher-gke-operator-crd
version: 101.0.0+up1.1.4

View File

@ -0,0 +1,250 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: gkeclusterconfigs.gke.cattle.io
spec:
group: gke.cattle.io
names:
kind: GKEClusterConfig
plural: gkeclusterconfigs
shortNames:
- gkecc
singular: gkeclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
clusterAddons:
nullable: true
properties:
horizontalPodAutoscaling:
type: boolean
httpLoadBalancing:
type: boolean
networkPolicyConfig:
type: boolean
type: object
clusterIpv4Cidr:
nullable: true
type: string
clusterName:
nullable: true
type: string
description:
nullable: true
type: string
enableKubernetesAlpha:
nullable: true
type: boolean
googleCredentialSecret:
nullable: true
type: string
imported:
type: boolean
ipAllocationPolicy:
nullable: true
properties:
clusterIpv4CidrBlock:
nullable: true
type: string
clusterSecondaryRangeName:
nullable: true
type: string
createSubnetwork:
type: boolean
nodeIpv4CidrBlock:
nullable: true
type: string
servicesIpv4CidrBlock:
nullable: true
type: string
servicesSecondaryRangeName:
nullable: true
type: string
subnetworkName:
nullable: true
type: string
useIpAliases:
type: boolean
type: object
kubernetesVersion:
nullable: true
type: string
labels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
locations:
items:
nullable: true
type: string
nullable: true
type: array
loggingService:
nullable: true
type: string
maintenanceWindow:
nullable: true
type: string
masterAuthorizedNetworks:
nullable: true
properties:
cidrBlocks:
items:
properties:
cidrBlock:
nullable: true
type: string
displayName:
nullable: true
type: string
type: object
nullable: true
type: array
enabled:
type: boolean
type: object
monitoringService:
nullable: true
type: string
network:
nullable: true
type: string
networkPolicyEnabled:
nullable: true
type: boolean
nodePools:
items:
properties:
autoscaling:
nullable: true
properties:
enabled:
type: boolean
maxNodeCount:
type: integer
minNodeCount:
type: integer
type: object
config:
nullable: true
properties:
diskSizeGb:
type: integer
diskType:
nullable: true
type: string
imageType:
nullable: true
type: string
labels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
localSsdCount:
type: integer
machineType:
nullable: true
type: string
oauthScopes:
items:
nullable: true
type: string
nullable: true
type: array
preemptible:
type: boolean
tags:
items:
nullable: true
type: string
nullable: true
type: array
taints:
items:
properties:
effect:
nullable: true
type: string
key:
nullable: true
type: string
value:
nullable: true
type: string
type: object
nullable: true
type: array
type: object
initialNodeCount:
nullable: true
type: integer
management:
nullable: true
properties:
autoRepair:
type: boolean
autoUpgrade:
type: boolean
type: object
maxPodsConstraint:
nullable: true
type: integer
name:
nullable: true
type: string
version:
nullable: true
type: string
type: object
nullable: true
type: array
privateClusterConfig:
nullable: true
properties:
enablePrivateEndpoint:
type: boolean
enablePrivateNodes:
type: boolean
masterIpv4CidrBlock:
nullable: true
type: string
type: object
projectID:
nullable: true
type: string
region:
nullable: true
type: string
subnetwork:
nullable: true
type: string
zone:
nullable: true
type: string
type: object
status:
properties:
failureMessage:
nullable: true
type: string
phase:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-gke-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: gkeclusterconfigs.gke.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-gke-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.1.4
description: A Helm chart for provisioning GKE clusters
home: https://github.com/rancher/gke-operator
name: rancher-gke-operator
sources:
- https://github.com/rancher/gke-operator
version: 101.0.0+up1.1.4

View File

@ -0,0 +1,4 @@
You have deployed the Rancher GKE operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions GKE clusters
from GKEClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: gke-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch']
- apiGroups: ['gke.cattle.io']
resources: ['gkeclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['gke.cattle.io']
resources: ['gkeclusterconfigs/status']
verbs: ['update']

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gke-operator
namespace: cattle-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gke-operator
subjects:
- kind: ServiceAccount
name: gke-operator
namespace: cattle-system

View File

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gke-config-operator
namespace: cattle-system
spec:
replicas: 1
selector:
matchLabels:
ke.cattle.io/operator: gke
template:
metadata:
labels:
ke.cattle.io/operator: gke
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
serviceAccountName: gke-operator
securityContext:
fsGroup: 1007
runAsUser: 1007
containers:
- name: rancher-gke-operator
image: {{ template "system_default_registry" . }}{{ .Values.gkeOperator.image.repository }}:{{ .Values.gkeOperator.image.tag }}
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PROXY
value: {{ .Values.httpProxy }}
- name: HTTPS_PROXY
value: {{ .Values.httpsProxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.additionalTrustedCAs }}
# gke-operator mounts the additional CAs in two places:
volumeMounts:
# This directory is owned by the gke-operator user so c_rehash works here.
- mountPath: /etc/rancher/ssl/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
# This directory is root-owned so c_rehash doesn't work here,
# but the cert is here in case update-ca-certificates is called in the future or by the OS.
- mountPath: /etc/pki/trust/anchors/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 0400
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: cattle-system
name: gke-operator

View File

@ -0,0 +1,20 @@
global:
cattle:
systemDefaultRegistry: ""
gkeOperator:
image:
repository: rancher/gke-operator
tag: v1.1.4
httpProxy: ""
httpsProxy: ""
noProxy: ""
additionalTrustedCAs: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: vSphere CPI
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.25.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: vsphere-cpi
apiVersion: v1
appVersion: 1.2.2
description: vSphere Cloud Provider Interface (CPI)
icon: https://charts.rancher.io/assets/logos/vsphere-cpi.svg
keywords:
- infrastructure
maintainers:
- email: jamie.phillips@suse.com
name: Rancher
name: rancher-vsphere-cpi
sources:
- https://github.com/kubernetes/cloud-provider-vsphere
version: 101.0.0+up1.2.2

View File

@ -0,0 +1,59 @@
# vSphere Cloud Provider Interface (CPI)
[vSphere Cloud Provider Interface (CPI)](https://github.com/kubernetes/cloud-provider-vsphere) is responsible for running all the platform specific control loops that were previously run in core Kubernetes components like the KCM and the kubelet, but have been moved out-of-tree to allow cloud and infrastructure providers to implement integrations that can be developed, built and released independent of Kubernetes core. The official documentation and tutorials can be found [here](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html).
**This chart requires being deployed into the `kube-system` namespace.**
## Prerequisites
- vSphere 6.7 U3+
- Kubernetes v1.14+
- A Secret on your Kubernetes cluster that contains vSphere credentials (Refer to `README` or `Detailed Descriptions`)
## Installation
This chart requires a Secret in your Kubernetes cluster that contains the server URL and credentials to connect to the vCenter. You can have the chart generate it for you, or create it yourself and provide the name of the Secret during installation.
<span style="color:orange">Warning</span>: When the option to generate the Secret is enabled, the credentials are visible in the API to authorized users. If you create the Secret yourself they will not be visible.
You can create a Secret in one of the following ways:
### <B>Option 1</b>: Create a Secret using the Rancher UI
Go to your cluster's project (Same project you will be installing the chart) > Resources > Secrets > Add Secret.
```yaml
# Example of data required in the Secret
<host-1>.username: <username>
<host-1>.password: <password>
```
### <B>Option 2</b>: Create a Secret using kubectl
Replace placeholders with actual values, and execute the following:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: <secret-name>
namespace: <charts-namespace>
data:
<host-1>.username: <base64encoded-username>
<host-1>.password: <base64encoded-password>
EOF
```
More information on managing Secrets using kubectl [here](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl/).
## Migration
If using this chart to migrate volumes provisioned by the in-tree provider to the out-of-tree CPI + CSI, you need to taint all nodes with the following:
```
node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule
```
To perform this operation on all nodes in your cluster, the following script has been provided for your convenience:
```bash
# Note: Since this script uses kubectl, ensure that you run `export KUBECONFIG=<path-to-kubeconfig-for-cluster>` before running this script
for node in $(kubectl get nodes | awk '{print $1}' | tail -n +2); do
kubectl taint node $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule
done
```

View File

@ -0,0 +1,11 @@
# vSphere Cloud Provider Interface (CPI)
[vSphere Cloud Provider Interface (CPI)](https://github.com/kubernetes/cloud-provider-vsphere) is responsible for running all the platform specific control loops that were previously run in core Kubernetes components like the KCM and the kubelet, but have been moved out-of-tree to allow cloud and infrastructure providers to implement integrations that can be developed, built and released independent of Kubernetes core. The official documentation and tutorials can be found [here](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html).
**This chart requires being deployed into the `kube-system` namespace.**
## Prerequisites
- vSphere 6.7 U3+ or vSphere 7.0+
- Kubernetes v1.19+
- A Secret on your Kubernetes cluster that contains vSphere credentials (Refer to `README` or `Detailed Descriptions`)

View File

@ -0,0 +1,42 @@
questions:
- variable: vCenter.host
label: vCenter Host
description: IP address or FQDN of the vCenter
type: string
required: true
group: Configuration
- variable: vCenter.datacenters
description: Comma-separated list of paths to data centers. E.g "<dc1-path>, <dc2-path>, ..."
label: Data Centers
type: string
required: true
group: Configuration
- variable: vCenter.credentialsSecret.generate
label: Generate Credential's Secret
description: Generates a secret with the vSphere credentials (If the option to generate it is enabled, credentials will be visible in the API to authorized users)
type: boolean
default: true
required: true
group: Configuration
show_subquestion_if: true
subquestions:
- variable: vCenter.username
label: Username
description: Username for vCenter
type: string
group: Configuration
- variable: vCenter.password
label: Password
description: Password for vCenter
type: password
group: Configuration
- variable: vCenter.credentialsSecret.name
label: Credential's Secret Name
description: Name of the secret with the vSphere credentials (Will not be visible in the API. More info in the README)
default: "vsphere-cpi-creds"
type: string
group: Configuration
show_if: "vCenter.credentialsSecret.generate=false"

View File

@ -0,0 +1,32 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "applyVersionOverrides" -}}
{{- $overrides := dict -}}
{{- range $override := .Values.versionOverrides -}}
{{- if semverCompare $override.constraint $.Capabilities.KubeVersion.Version -}}
{{- $_ := mergeOverwrite $overrides $override.values -}}
{{- end -}}
{{- end -}}
{{- $_ := mergeOverwrite .Values $overrides -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: vsphere-cloud-config
labels:
vsphere-cpi-infra: config
component: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
data:
vsphere.conf: |
[Global]
secret-name = {{ .Values.vCenter.credentialsSecret.name | quote }}
secret-namespace = {{ .Release.Namespace | quote }}
port = {{ .Values.vCenter.port | quote }}
insecure-flag = {{ .Values.vCenter.insecureFlag | quote }}
[VirtualCenter {{ .Values.vCenter.host | quote }}]
datacenters = {{ .Values.vCenter.datacenters | quote }}

View File

@ -0,0 +1,101 @@
{{- template "applyVersionOverrides" . -}}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Chart.Name }}-cloud-controller-manager
labels:
component: {{ .Chart.Name }}-cloud-controller-manager
tier: control-plane
namespace: {{ .Release.Namespace }}
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
selector:
matchLabels:
name: {{ .Chart.Name }}-cloud-controller-manager
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: {{ .Chart.Name }}-cloud-controller-manager
component: {{ .Chart.Name }}-cloud-controller-manager
tier: control-plane
spec:
{{- if .Values.cloudControllerManager.nodeSelector }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- with .Values.cloudControllerManager.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
# RKE node selector label
- key: node-role.kubernetes.io/controlplane
operator: In
values:
- "true"
- matchExpressions:
# RKE2 node selector label
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- "true"
- matchExpressions:
- key: kubernetes.io/os
operator: NotIn
values:
- "windows"
{{- end }}
{{- if .Values.cloudControllerManager.tolerations }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- with .Values.cloudControllerManager.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
- key: node.kubernetes.io/not-ready
effect: NoSchedule
operator: Exists
# Rancher specific change: These tolerations are added to account for RKE1 and RKE2 taints
- key: node-role.kubernetes.io/controlplane
effect: NoSchedule
value: "true"
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/etcd
effect: NoExecute
operator: Exists
{{- end }}
securityContext:
runAsUser: 1001
serviceAccountName: {{ .Chart.Name }}-cloud-controller-manager
containers:
- name: {{ .Chart.Name }}-cloud-controller-manager
image: {{ template "system_default_registry" . }}{{ .Values.cloudControllerManager.repository }}:{{ .Values.cloudControllerManager.tag }}
args:
- --cloud-provider=vsphere
- --v=2
- --cloud-config=/etc/cloud/vsphere.conf
volumeMounts:
- mountPath: /etc/cloud
name: vsphere-config-volume
readOnly: true
resources:
requests:
cpu: 200m
hostNetwork: true
volumes:
- name: vsphere-config-volume
configMap:
name: vsphere-cloud-config

View File

@ -0,0 +1,43 @@
{{- if .Values.cloudControllerManager.rbac.enabled -}}
apiVersion: v1
kind: List
metadata: {}
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: servicecatalog.k8s.io:apiserver-authentication-reader
labels:
vsphere-cpi-infra: role-binding
component: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
- apiGroup: ""
kind: User
name: {{ .Chart.Name }}-cloud-controller-manager
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:{{ .Chart.Name }}-cloud-controller-manager
labels:
vsphere-cpi-infra: cluster-role-binding
component: {{ .Chart.Name }}-cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:{{ .Chart.Name }}-cloud-controller-manager
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
- kind: User
name: {{ .Chart.Name }}-cloud-controller-manager
{{- end -}}

View File

@ -0,0 +1,92 @@
{{- if .Values.cloudControllerManager.rbac.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:{{ .Chart.Name }}-cloud-controller-manager
labels:
vsphere-cpi-infra: role
component: {{ .Chart.Name }}-cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- "*"
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- list
- watch
- update
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if .Values.vCenter.credentialsSecret.generate -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.vCenter.credentialsSecret.name }}
labels:
vsphere-cpi-infra: secret
component: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
data:
{{ .Values.vCenter.host }}.username: {{ .Values.vCenter.username | b64enc | quote }}
{{ .Values.vCenter.host }}.password: {{ .Values.vCenter.password | b64enc | quote }}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- if .Values.cloudControllerManager.rbac.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-cloud-controller-manager
labels:
vsphere-cpi-infra: service-account
component: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
component: {{ .Chart.Name }}-cloud-controller-manager
name: {{ .Chart.Name }}-cloud-controller-manager
namespace: {{ .Release.Namespace }}
spec:
type: NodePort
ports:
- port: 43001
protocol: TCP
targetPort: 43001
selector:
component: {{ .Chart.Name }}-cloud-controller-manager

View File

@ -0,0 +1,66 @@
vCenter:
host: ""
port: 443
insecureFlag: "1"
datacenters: ""
username: ""
password: ""
credentialsSecret:
name: "vsphere-cpi-creds"
generate: true
# A list of Semver constraint strings (defined by https://github.com/Masterminds/semver) and values.yaml overrides.
#
# For each key in versionOvverides, this chart will check to see if the current Kubernetes cluster's version matches
# any of the semver constraints provided as keys on the map.
#
# On seeing a match, the default value for each values.yaml field overridden will be updated with the new value.
#
# If multiple matches are encountered (due to overlapping semver ranges), the matches will be applied in order.
#
# Notes:
# - On running a helm template, Helm generally assumes the kubeVersion is v1.20.0
# - On running a helm install --dry-run, the correct kubeVersion should be chosen.
versionOverrides:
- constraint: ">= 1.23 < 1.25"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.23.0
- constraint: "~ 1.22"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.22.6
- constraint: "~ 1.21"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.21.3
- constraint: "~ 1.20"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.20.1
- constraint: "~ 1.19"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.19.0
- constraint: "~ 1.18"
values:
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.18.0
cloudControllerManager:
repository: rancher/mirrored-cloud-provider-vsphere-cpi-release-manager
tag: v1.22.6
nodeSelector: {}
tolerations: []
rbac:
enabled: true
global:
cattle:
systemDefaultRegistry: ""

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: vSphere CSI
catalog.cattle.io/kube-version: '>= 1.20.0-0 < 1.24.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux,windows
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: vsphere-csi
apiVersion: v1
appVersion: 2.5.1-rancher1
description: vSphere Cloud Storage Interface (CSI)
icon: https://charts.rancher.io/assets/logos/vsphere-csi.svg
keywords:
- infrastructure
maintainers:
- email: jamie.phillips@suse.com
name: Rancher
name: rancher-vsphere-csi
sources:
- https://github.com/kubernetes-sigs/vsphere-csi-driver
version: 101.0.0+up2.5.1-rancher1

View File

@ -0,0 +1,73 @@
# vSphere Container Storage Interface (CSI)
[vSphere Container Storage Interface (CSI)](https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/release-2.1/manifests/v2.1.0/vsphere-7.0u1/) is a specification designed to enable persistent storage volume management on Container Orchestrators (COs) such as Kubernetes. The specification allows storage systems to integrate with containerized workloads running on Kubernetes. Using CSI, storage providers, such as VMware, can write and deploy plugins for storage systems in Kubernetes without a need to modify any core Kubernetes code.
CSI allows volume plugins to be installed on Kubernetes clusters as extensions. Once a CSI compatible volume driver is deployed on a Kubernetes cluster, users can use the CSI to provision, attach, mount, and format the volumes exposed by the CSI driver.
The CSI driver for vSphere is `csi.vsphere.vmware.com`.
## Prerequisites
- vSphere 6.7 U3+
- Kubernetes v1.20+
- Out-of-tree vSphere Cloud Provider Interface (CPI)
- A Secret on your Kubernetes cluster that contains vSphere CSI configuration and credentials
## Installation
This chart requires a Secret in your Kubernetes cluster that contains the CSI configuration and credentials to connect to the vCenter. You can have the chart generate it for you, or create it yourself and provide the name of the Secret during installation.
<span style="color:orange">Warning</span>: When the option to generate the Secret is enabled, the credentials are visible in the API to authorized users. If you create the Secret yourself they will not be visible.
You can create a Secret in one of the following ways:
### <B>Option 1</b>: Create a Secret using the Rancher UI
Go to your cluster's project (Same project you will be installing the chart) > Resources > Secrets > Add Secret.
```yaml
# Example of data required in the Secret
# The csi-vsphere.conf key name is required, otherwise the installation will fail
csi-vsphere.conf: |
[Global]
cluster-id = "<cluster-id>"
user = "<username>"
password = "<password>"
port = "<port>"
insecure-flag = "<insecure-flag>"
[VirtualCenter "<host>"]
datacenters = "<dc-1>, <dc-2>, ..."
```
More information on CSI vSphere configuration [here](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/installation.html#create_k8s_secret).
### <B>Option 2</b>: Create a Secret using kubectl
Replace placeholders with actual values, and execute the following:
```bash
# The csi-vsphere.conf key name is required, otherwise the installation will fail
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: <secret-name>
namespace: <charts-namespace>
stringData:
csi-vsphere.conf: |
[Global]
cluster-id = "<cluster-id>"
user = "<username>"
password = "<password>"
port = "<port>"
insecure-flag = "<insecure-flag>"
[VirtualCenter "<host>"]
datacenters = "<dc-1>, <dc-2>, ..."
EOF
```
More information on managing Secrets using kubectl [here](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl/).
## Migration
The CSI migration feature is only available for vSphere 7.0 U1.

View File

@ -0,0 +1,14 @@
# vSphere Container Storage Interface (CSI)
[vSphere Container Storage Interface (CSI)](https://github.com/kubernetes-sigs/vsphere-csi-driver) is a specification designed to enable persistent storage volume management on Container Orchestrators (COs) such as Kubernetes. The specification allows storage systems to integrate with containerized workloads running on Kubernetes. Using CSI, storage providers, such as VMware, can write and deploy plugins for storage systems in Kubernetes without a need to modify any core Kubernetes code.
CSI allows volume plugins to be installed on Kubernetes clusters as extensions. Once a CSI compatible volume driver is deployed on a Kubernetes cluster, users can use the CSI to provision, attach, mount, and format the volumes exposed by the CSI driver.
The CSI driver for vSphere is `csi.vsphere.vmware.com`.
## Prerequisites
- vSphere 6.7 U3+
- Kubernetes v1.14+
- Out-of-tree vSphere Cloud Provider Interface (CPI)
- A Secret on your Kubernetes cluster that contains vSphere CSI configuration and credentials (Refer to `README` or `Detailed Descriptions`)

View File

@ -0,0 +1,141 @@
questions:
- variable: vCenter.configSecret.generate
label: Generate CSI vSphere Config Secret
description: Generates a Secret that contains a CSI vSphere config and credentials (If the option to generate it is enabled, credentials will be visible in the API to authorized users)
type: boolean
default: true
required: true
group: vCenter Configuration
show_subquestion_if: true
subquestions:
- variable: vCenter.host
label: vCenter Host
description: IP address or FQDN of the vCenter
type: string
- variable: vCenter.datacenters
description: Comma-separated list of paths to data centers. E.g "<dc1-path>, <dc2-path>, ..."
label: Data Centers
type: string
- variable: vCenter.username
label: Username
description: Username for vCenter
type: string
- variable: vCenter.password
label: Password
description: Password for vCenter
type: password
- variable: vCenter.configSecret.name
label: CSI vSphere Config Secret Name
description: Name of the Secret that contains a CSI vSphere config and credentials (Will not be visible in the API. More info in the README)
type: string
group: vCenter Configuration
show_if: "vCenter.configSecret.generate=false"
- variable: csiMigration.enabled
label: Enable CSI Migration
description: Enable migration of volumes provisioned by in-tree vSphere provider to CSI (Available for vSphere 7.0 U1+ only)
type: boolean
default: false
group: Driver Configuration
- variable: csiAuthCheck.enabled
label: Enable authorization checks on operations involving datastores
type: boolean
default: false
group: Driver Configuration
- variable: onlineVolumeExtend.enabled
label: Enable Online Volume Extend
description: Enable expansion of PVCs that are in use by a Pod or mounted in a Node (Available for vSphere 7.0 U2+ only)
type: boolean
default: false
group: Driver Configuration
- variable: triggerCsiFullsync.enabled
label: Enable CSI Full Sync
description: Keeps CNS up to date with Kubernetes volume metadata information (such as PVs, PVCs, pods, and so on)
type: boolean
default: false
group: Driver Configuration
- variable: asyncQueryVolume.enabled
label: Enable Async Query Volume
description: Improves retrieval of volume information
type: boolean
default: false
group: Driver Configuration
- variable: improvedCsiIdempotency.enabled
label: Enable Improved CSI Idempotency
description: Enhances driver to ensure volume operations are idempotent
type: boolean
default: false
group: Driver Configuration
- variable: improvedVolumeTopology.enabled
label: Enable Improved Volume Topology
description: Allows using the topology feature without the need to mount vSphere credentials in the CSI node daemonset
type: boolean
default: false
group: Driver Configuration
- variable: csiWindowsSupport.enabled
label: Enable CSI Windows Support
description: Enables Windows support.
type: boolean
default: false
group: Driver Configuration
- variable: csiController.csiResizer.enabled
label: Enable CSI Volume Resizer
description: This feature is available for vSphere 7.0 U1+ only
type: boolean
default: false
group: Storage
- variable: storageClass.enabled
default: true
label: Create Storage Class
description: Create a storageClass with the vSphere CSI provisioner
type: boolean
required: true
show_subquestion_if: true
group: Storage
subquestions:
- variable: storageClass.name
label: Storage Class Name
default: "vsphere-csi-sc"
type: string
- variable: storageClass.isDefault
label: Default Storage Class
description: Set the Storage Class as the default
default: true
type: boolean
- variable: storageClass.allowVolumeExpansion
label: Allow Volume Expansion
description: Allows resizing the volume by editing the corresponding PVC object (Available for vSphere 7.0+ only)
default: false
type: boolean
- variable: storageClass.storagePolicyName
label: Storage Policy Name
description: Name of the Storage Policy created in vCenter
type: string
- variable: storageClass.datastoreURL
label: Data Store URL
description: URL of the data store to use for new volumes (If unspecified, any data store that matches the request will be selected).
type: string
- variable: csiNode.prefixPath
label: Prefix Path for `/var/lib/kubelet`
description: For some operating systems including RancherOS, RKE prefixes `/var/lib/kubelet` with `/opt/rke`. Add the prefix path of the location of /var/lib/kubelet
type: string
default: ""
group: Node Configuration

View File

@ -0,0 +1,32 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "applyVersionOverrides" -}}
{{- $overrides := dict -}}
{{- range $override := .Values.versionOverrides -}}
{{- if semverCompare $override.constraint $.Capabilities.KubeVersion.Version -}}
{{- $_ := mergeOverwrite $overrides $override.values -}}
{{- end -}}
{{- end -}}
{{- $_ := mergeOverwrite .Values $overrides -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,18 @@
# Source: https://github.com/kubernetes-sigs/vsphere-csi-driver
apiVersion: v1
data:
"csi-migration": {{ .Values.csiMigration.enabled | quote }}
"csi-auth-check": {{ .Values.csiAuthCheck.enabled | quote }}
"online-volume-extend": {{ .Values.onlineVolumeExtend.enabled | quote }}
"trigger-csi-fullsync": {{ .Values.triggerCsiFullsync.enabled | quote }}
"async-query-volume": {{ .Values.asyncQueryVolume.enabled | quote }}
"improved-csi-idempotency": {{ .Values.improvedCsiIdempotency.enabled | quote }}
"improved-volume-topology": {{ .Values.improvedVolumeTopology.enabled | quote }}
"csi-windows-support": {{ .Values.csiWindowsSupport.enabled | quote }}
"use-csinode-id": {{ .Values.useCsinodeId.enabled | quote }}
"pv-to-backingdiskobjectid-mapping": {{ .Values.pvToBackingdiskobjectidMapping.enabled | quote }}
"cnsmgr-suspend-create-volume": {{ .Values.cnsmgrSuspendCreateVolume.enabled | quote }}
kind: ConfigMap
metadata:
name: internal-feature-states.csi.vsphere.vmware.com
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,220 @@
{{- template "applyVersionOverrides" . -}}
kind: Deployment
apiVersion: apps/v1
metadata:
name: vsphere-csi-controller
namespace: {{ .Release.Namespace }}
spec:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 0
selector:
matchLabels:
app: vsphere-csi-controller
template:
metadata:
labels:
app: vsphere-csi-controller
role: vsphere-csi
spec:
serviceAccountName: vsphere-csi-controller
{{- if .Values.csiController.nodeSelector }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- with .Values.csiController.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
# RKE node selector label
- key: node-role.kubernetes.io/controlplane
operator: In
values:
- "true"
- matchExpressions:
# RKE2 node selector label
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- "true"
- matchExpressions:
# Rancher node selector label
- key: kubernetes.io/os
operator: NotIn
values:
- "windows"
{{- end }}
{{- if .Values.csiController.tolerations }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- with .Values.csiController.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# Rancher specific change: These tolerations are added to account for RKE1 and RKE2 taints
- key: node-role.kubernetes.io/controlplane
effect: NoSchedule
value: "true"
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/etcd
effect: NoExecute
operator: Exists
{{- end }}
dnsPolicy: "Default"
containers:
- name: csi-attacher
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.csiAttacher.repository }}:{{ .Values.csiController.image.csiAttacher.tag }}"
args:
- "--v=4"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{{- if .Values.csiController.csiResizer.enabled }}
- name: csi-resizer
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.csiResizer.repository }}:{{ .Values.csiController.image.csiResizer.tag }}"
args:
- "--v=4"
- "--timeout=300s"
- "--handle-volume-inuse-error=false"
- "--csi-address=$(ADDRESS)"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{{- end }}
- name: vsphere-csi-controller
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.repository }}:{{ .Values.csiController.image.tag }}"
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace=$(CSI_NAMESPACE)"
- "--use-gocsi=false"
imagePullPolicy: "Always"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: X_CSI_MODE
value: "controller"
- name: X_CSI_SPEC_DISABLE_LEN_CHECK
value: "true"
- name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT
value: 3m
- name: VSPHERE_CSI_CONFIG
value: "/etc/cloud/csi-vsphere.conf"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
- name: INCLUSTER_CLIENT_QPS
value: "100"
- name: INCLUSTER_CLIENT_BURST
value: "100"
- name: CSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- mountPath: /etc/cloud
name: vsphere-config-volume
readOnly: true
- mountPath: /csi
name: socket-dir
ports:
- name: healthz
containerPort: 9808
protocol: TCP
- name: prometheus
containerPort: 2112
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
- name: liveness-probe
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.livenessProbe.repository }}:{{ .Values.csiController.image.livenessProbe.tag }}"
args:
- "--v=4"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: vsphere-syncer
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.vsphereSyncer.repository }}:{{ .Values.csiController.image.vsphereSyncer.tag }}"
args:
- "--leader-election"
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace=$(CSI_NAMESPACE)"
imagePullPolicy: "Always"
ports:
- containerPort: 2113
name: prometheus
protocol: TCP
env:
- name: FULL_SYNC_INTERVAL_MINUTES
value: "30"
- name: VSPHERE_CSI_CONFIG
value: "/etc/cloud/csi-vsphere.conf"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
- name: INCLUSTER_CLIENT_QPS
value: "100"
- name: INCLUSTER_CLIENT_BURST
value: "100"
- name: CSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- mountPath: /etc/cloud
name: vsphere-config-volume
readOnly: true
- name: csi-provisioner
image: "{{ template "system_default_registry" . }}{{ .Values.csiController.image.csiProvisioner.repository }}:{{ .Values.csiController.image.csiProvisioner.tag }}"
args:
- "--v=4"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
- "--leader-election"
- "--default-fstype=ext4"
# needed only for topology aware setup
#- "--feature-gates=Topology=true"
#- "--strict-topology"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- name: vsphere-config-volume
secret:
secretName: {{ .Values.vCenter.configSecret.name }}
- name: socket-dir
emptyDir: {}

View File

@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-controller-binding
subjects:
- kind: ServiceAccount
name: vsphere-csi-controller
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: vsphere-csi-controller-role
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,59 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-controller-role
rules:
- apiGroups: [""]
resources: ["nodes", "pods", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["cns.vmware.com"]
resources: ["triggercsifullsyncs"]
verbs: ["create", "get", "update", "watch", "list"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvspherevolumemigrations"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "create", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvolumeoperationrequests"]
verbs: ["create", "get", "list", "update", "delete"]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshots" ]
verbs: [ "get", "list" ]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotclasses" ]
verbs: [ "watch", "get", "list" ]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotcontents" ]
verbs: [ "create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotcontents/status" ]
verbs: [ "update", "patch" ]
- apiGroups: [ "cns.vmware.com" ]
resources: [ "csinodetopologies" ]
verbs: ["get", "update", "watch", "list"]

View File

@ -0,0 +1,5 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-controller
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: vsphere-csi-controller
namespace: {{ .Release.Namespace }}
labels:
app: vsphere-csi-controller
spec:
ports:
- name: ctlr
port: 2112
targetPort: 2112
protocol: TCP
- name: syncer
port: 2113
targetPort: 2113
protocol: TCP
selector:
app: vsphere-csi-controller

View File

@ -0,0 +1,8 @@
# Source: https://github.com/kubernetes-sigs/vsphere-csi-driver
apiVersion: storage.k8s.io/v1 # For k8s 1.17 use storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: csi.vsphere.vmware.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -0,0 +1,178 @@
{{- template "applyVersionOverrides" . -}}
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: vsphere-csi-node
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: vsphere-csi-node
updateStrategy:
type: "RollingUpdate"
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app: vsphere-csi-node
role: vsphere-csi
spec:
{{- if .Values.csiNode.nodeSelector }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- with .Values.csiNode.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
# Rancher node selector label
- key: kubernetes.io/os
operator: NotIn
values:
- "windows"
{{- end }}
{{- if .Values.csiNode.tolerations }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- with .Values.csiNode.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# Rancher specific change: These tolerations are added to account for RKE1 and RKE2 taints
- key: node-role.kubernetes.io/controlplane
effect: NoSchedule
value: "true"
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/etcd
effect: NoExecute
operator: Exists
{{- end }}
serviceAccountName: vsphere-csi-node
hostNetwork: true
dnsPolicy: "ClusterFirstWithHostNet"
containers:
- name: node-driver-registrar
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.nodeDriverRegistrar.repository }}:{{ .Values.csiNode.image.nodeDriverRegistrar.tag }}"
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.csiNode.prefixPath }}/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock
- --mode=kubelet-registration-probe
initialDelaySeconds: 3
- name: vsphere-csi-node
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.repository }}:{{ .Values.csiNode.image.tag }}"
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace=$(CSI_NAMESPACE)"
- "--use-gocsi=false"
imagePullPolicy: "Always"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: MAX_VOLUMES_PER_NODE
value: "59" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node.
- name: X_CSI_MODE
value: "node"
- name: X_CSI_SPEC_REQ_VALIDATION
value: "false"
- name: X_CSI_SPEC_DISABLE_LEN_CHECK
value: "true"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
- name: CSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODEGETINFO_WATCH_TIMEOUT_MINUTES
value: "1"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.csiNode.prefixPath }}/var/lib/kubelet
# needed so that any mounts setup inside this container are
# propagated back to the host machine.
mountPropagation: "Bidirectional"
- name: device-dir
mountPath: /dev
- name: blocks-dir
mountPath: /sys/block
- name: sys-devices-dir
mountPath: /sys/devices
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 5
periodSeconds: 5
failureThreshold: 3
- name: liveness-probe
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.livenessProbe.repository }}:{{ .Values.csiNode.image.livenessProbe.tag }}"
args:
- "--v=4"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: registration-dir
hostPath:
path: {{ .Values.csiNode.prefixPath }}/var/lib/kubelet/plugins_registry
type: Directory
- name: plugin-dir
hostPath:
path: {{ .Values.csiNode.prefixPath }}/var/lib/kubelet/plugins/csi.vsphere.vmware.com
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.csiNode.prefixPath }}/var/lib/kubelet
type: Directory
- name: device-dir
hostPath:
path: /dev
- name: blocks-dir
hostPath:
path: /sys/block
type: Directory
- name: sys-devices-dir
hostPath:
path: /sys/devices
type: Directory

View File

@ -0,0 +1,28 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-binding
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: vsphere-csi-node-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-cluster-role-binding
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: vsphere-csi-node-cluster-role
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,21 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-cluster-role
rules:
- apiGroups: ["cns.vmware.com"]
resources: ["csinodetopologies"]
verbs: ["create", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]

View File

@ -0,0 +1,5 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-node
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,165 @@
{{- if .Values.csiWindowsSupport.enabled }}
{{- template "applyVersionOverrides" . -}}
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: vsphere-csi-node-windows
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: vsphere-csi-node-windows
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app: vsphere-csi-node-windows
role: vsphere-csi-windows
spec:
nodeSelector:
kubernetes.io/os: windows
{{- if .Values.csiNode.tolerations }}
tolerations:
{{- with .Values.csiNode.tolerations }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- else }}
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# Rancher specific change: These tolerations are added to account for RKE1 and RKE2 taints
- key: node-role.kubernetes.io/controlplane
effect: NoSchedule
value: "true"
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/etcd
effect: NoExecute
operator: Exists
{{- end }}
serviceAccountName: vsphere-csi-node
containers:
- name: node-driver-registrar
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.nodeDriverRegistrar.repository }}:{{ .Values.csiNode.image.nodeDriverRegistrar.tag }}"
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--health-port=9809"
env:
- name: ADDRESS
value: 'unix://C:\\csi\\csi.sock'
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.csiNode.prefixPath }}'\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock'
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
livenessProbe:
exec:
command:
- /csi-node-driver-registrar.exe
- --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock
- --mode=kubelet-registration-probe
initialDelaySeconds: 3
- name: vsphere-csi-node
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.repository }}:{{ .Values.csiNode.image.tag }}"
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace=$(CSI_NAMESPACE)"
imagePullPolicy: "Always"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: 'unix://C:\\csi\\csi.sock'
- name: MAX_VOLUMES_PER_NODE
value: "0" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node.
- name: X_CSI_MODE
value: node
- name: X_CSI_SPEC_REQ_VALIDATION
value: 'false'
- name: X_CSI_SPEC_DISABLE_LEN_CHECK
value: "true"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
- name: X_CSI_LOG_LEVEL
value: DEBUG
- name: CSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODEGETINFO_WATCH_TIMEOUT_MINUTES
value: "1"
volumeMounts:
- name: plugin-dir
mountPath: 'C:\csi'
- name: pods-mount-dir
mountPath: 'C:\var\lib\kubelet'
- name: csi-proxy-volume-v1
mountPath: \\.\pipe\csi-proxy-volume-v1
- name: csi-proxy-filesystem-v1
mountPath: \\.\pipe\csi-proxy-filesystem-v1
- name: csi-proxy-disk-v1
mountPath: \\.\pipe\csi-proxy-disk-v1
- name: csi-proxy-system-v1alpha1
mountPath: \\.\pipe\csi-proxy-system-v1alpha1
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 5
periodSeconds: 5
failureThreshold: 3
- name: liveness-probe
image: "{{ template "system_default_registry" . }}{{ .Values.csiNode.image.livenessProbe.repository }}:{{ .Values.csiNode.image.livenessProbe.tag }}"
args:
- "--v=4"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: registration-dir
hostPath:
path: 'C:\var\lib\kubelet\plugins_registry\'
type: Directory
- name: plugin-dir
hostPath:
path: 'C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\'
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: \var\lib\kubelet
type: Directory
- name: csi-proxy-disk-v1
hostPath:
path: \\.\pipe\csi-proxy-disk-v1
type: ''
- name: csi-proxy-volume-v1
hostPath:
path: \\.\pipe\csi-proxy-volume-v1
type: ''
- name: csi-proxy-filesystem-v1
hostPath:
path: \\.\pipe\csi-proxy-filesystem-v1
type: ''
- name: csi-proxy-system-v1alpha1
hostPath:
path: \\.\pipe\csi-proxy-system-v1alpha1
type: ''
{{ end }}

View File

@ -0,0 +1,9 @@
{{- if .Values.vCenter.configSecret.generate -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.vCenter.configSecret.name }}
namespace: {{ .Release.Namespace }}
data:
csi-vsphere.conf: {{ tpl .Values.vCenter.configSecret.configTemplate . | b64enc | quote }}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- if .Values.storageClass.enabled -}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .Values.storageClass.name | quote }}
annotations:
storageclass.kubernetes.io/is-default-class: {{ .Values.storageClass.isDefault | quote }}
provisioner: csi.vsphere.vmware.com
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
parameters:
{{- if .Values.storageClass.datastoreURL }}
datastoreURL: {{ .Values.storageClass.datastoreURL | quote }}
{{- end }}
{{- if .Values.storageClass.storagePolicyName }}
storagepolicyname: {{ .Values.storageClass.storagePolicyName | quote }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,176 @@
vCenter:
host: ""
port: 443
insecureFlag: "1"
clusterId: ""
datacenters: ""
username: ""
password: ""
configSecret:
name: "vsphere-config-secret"
generate: true
configTemplate: |
[Global]
cluster-id = {{ required ".Values.vCenter.clusterId must be provided" (default .Values.vCenter.clusterId .Values.global.cattle.clusterId) | quote }}
user = {{ .Values.vCenter.username | quote }}
password = {{ .Values.vCenter.password | quote }}
port = {{ .Values.vCenter.port | quote }}
insecure-flag = {{ .Values.vCenter.insecureFlag | quote }}
[VirtualCenter {{ .Values.vCenter.host | quote }}]
datacenters = {{ .Values.vCenter.datacenters | quote }}
csiController:
csiResizer:
enabled: false
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.5.1
csiAttacher:
repository: rancher/mirrored-sig-storage-csi-attacher
tag: v3.4.0
csiResizer:
repository: rancher/mirrored-sig-storage-csi-resizer
tag: v1.4.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.6.0
vsphereSyncer:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-syncer
tag: v2.5.1
csiProvisioner:
repository: rancher/mirrored-sig-storage-csi-provisioner
tag: v3.1.0
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
# Uncomment below toleration if you need an aggressive pod eviction in case when
# node becomes not-ready or unreachable. Default is 300 seconds if not specified.
tolerations: []
# - key: node.kubernetes.io/not-ready
# operator: Exists
# effect: NoExecute
# tolerationSeconds: 30
# - key: node.kubernetes.io/unreachable
# operator: Exists
# effect: NoExecute
# tolerationSeconds: 30
# Internal features
csiMigration:
enabled: false
csiAuthCheck:
enabled: false
onlineVolumeExtend:
enabled: false
triggerCsiFullsync:
enabled: false
asyncQueryVolume:
enabled: false
improvedCsiIdempotency:
enabled: false
improvedVolumeTopology:
enabled: false
csiWindowsSupport:
enabled: false
useCsinodeId:
enabled: true
pvToBackingdiskobjectidMapping:
enabled: false
cnsmgrSuspendCreateVolume:
enabled: false
csiNode:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
prefixPath: ""
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.5.1
nodeDriverRegistrar:
repository: rancher/mirrored-sig-storage-csi-node-driver-registrar
tag: v2.5.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.6.0
storageClass:
enabled: true
allowVolumeExpansion: false
name: "vsphere-csi-sc"
isDefault: true
storagePolicyName: ""
datastoreURL: ""
global:
cattle:
systemDefaultRegistry: ""
versionOverrides:
- constraint: ">= 1.21 < 1.24"
values:
csiController:
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.5.1
csiAttacher:
repository: rancher/mirrored-sig-storage-csi-attacher
tag: v3.4.0
csiResizer:
repository: rancher/mirrored-sig-storage-csi-resizer
tag: v1.4.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.6.0
vsphereSyncer:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-syncer
tag: v2.5.1
csiProvisioner:
repository: rancher/mirrored-sig-storage-csi-provisioner
tag: v3.1.0
csiNode:
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.5.1
nodeDriverRegistrar:
repository: rancher/mirrored-sig-storage-csi-node-driver-registrar
tag: v2.5.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.6.0
- constraint: "~ 1.20"
values:
csiController:
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.4.1
csiAttacher:
repository: rancher/mirrored-sig-storage-csi-attacher
tag: v3.3.0
csiResizer:
repository: rancher/mirrored-sig-storage-csi-resizer
tag: v1.3.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.4.0
vsphereSyncer:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-syncer
tag: v2.4.1
csiProvisioner:
repository: rancher/mirrored-sig-storage-csi-provisioner
tag: v3.0.0
csiNode:
image:
repository: rancher/mirrored-cloud-provider-vsphere-csi-release-driver
tag: v2.4.1
nodeDriverRegistrar:
repository: rancher/mirrored-sig-storage-csi-node-driver-registrar
tag: v2.3.0
livenessProbe:
repository: rancher/mirrored-sig-storage-livenessprobe
tag: v2.4.0

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-windows-gmsa-system
catalog.cattle.io/release-name: rancher-windows-gmsa-crd
apiVersion: v1
description: Installs the CRDs for Windows GMSA.
name: rancher-windows-gmsa-crd
type: application
version: 2.0.0

View File

@ -0,0 +1,119 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: gmsacredentialspecs.windows.k8s.io
annotations:
"api-approved.kubernetes.io": "https://github.com/kubernetes/enhancements/tree/master/keps/sig-windows/689-windows-gmsa"
spec:
group: windows.k8s.io
versions:
- name: v1alpha1
served: true
storage: false
deprecated: true
schema:
openAPIV3Schema:
type: object
properties:
credspec:
description: GMSA Credential Spec
type: object
properties:
ActiveDirectoryConfig:
type: object
properties:
GroupManagedServiceAccounts:
type: array
items:
type: object
properties:
Name:
type: string
Scope:
type: string
HostAccountConfig:
type: object
properties:
PluginGUID:
type: string
PluginInput:
type: string
PortableCcgVersion:
type: string
CmsPlugins:
type: array
items:
type: string
DomainJoinConfig:
type: object
properties:
DnsName:
type: string
DnsTreeName:
type: string
Guid:
type: string
MachineAccountName:
type: string
NetBiosName:
type: string
Sid:
type: string
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
credspec:
description: GMSA Credential Spec
type: object
properties:
ActiveDirectoryConfig:
type: object
properties:
GroupManagedServiceAccounts:
type: array
items:
type: object
properties:
Name:
type: string
Scope:
type: string
HostAccountConfig:
type: object
properties:
PluginGUID:
type: string
PluginInput:
type: string
PortableCcgVersion:
type: string
CmsPlugins:
type: array
items:
type: string
DomainJoinConfig:
type: object
properties:
DnsName:
type: string
DnsTreeName:
type: string
Guid:
type: string
MachineAccountName:
type: string
NetBiosName:
type: string
Sid:
type: string
conversion:
strategy: None
names:
kind: GMSACredentialSpec
plural: gmsacredentialspecs
scope: Cluster

View File

@ -0,0 +1,29 @@
annotations:
catalog.cattle.io/auto-install: rancher-windows-gmsa-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Windows GMSA
catalog.cattle.io/experimental: "true"
catalog.cattle.io/kube-version: '>= 1.21.0-0 < 1.24.0-0'
catalog.cattle.io/namespace: cattle-windows-gmsa-system
catalog.cattle.io/os: windows
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: windows.k8s.io.gmsacredentialspecs/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-windows-gmsa
apiVersion: v2
appVersion: 0.3.0
description: Windows GMSA Configuration
icon: https://charts.rancher.io/assets/logos/windows-gmsa.svg
keywords:
- Windows
- Windows GMSA
- GMSA
- Active Directory
maintainers:
- email: jamie.phillips@suse.com
name: Rancher
name: rancher-windows-gmsa
sources:
- https://github.com/kubernetes-sigs/windows-gmsa
type: application
version: 2.0.0

View File

@ -0,0 +1,9 @@
# Windows GMSA Admission Webhook
This chart creates the GMSA CRD, Credential, and Admission Webhook. The official documentation and tutorials can be found [here](https://github.com/kubernetes-sigs/windows-gmsa).
## Prerequisites
- Active Directory that supports Group Managed Service Accounts
- A Group Managed Service Account
- Kubernetes v1.21+

View File

@ -0,0 +1,53 @@
questions:
- variable: credential.enabled
default: true
description: Whether to create a GMSA Credential when installing GMSA Webhook
label: Whether to create a GMSA Credential
type: boolean
group: "Credential Spec"
show_subquestion_if: true
subquestions:
- variable: credential.domainJoinConfig.machineAccountName
label: GMSA Account Name
description: Username of the GMSA account
type: string
required: true
- variable: credential.domainJoinConfig.guid
label: GUID
description: GUID of the Service Account
type: string
required: true
- variable: credential.domainJoinConfig.sid
label: SID
description: SID of the GMSA Account
type: string
required: true
- variable: credential.domainJoinConfig.dnsName
label: DNS Domain Name
description: Name of the domain in DNS
type: string
required: true
- variable: credential.domainJoinConfig.dnsTreeName
label: DNS Tree Domain
description: Root name of the domain in DNS
type: string
required: true
- variable: credential.domainJoinConfig.netBiosName
label: NETBIOS Name
description: NETBIOS Name for the domain.
type: string
required: true
- variable: certificates.certManager.enabled
default: true
description: Use cert-manager to generate certificates for the webhook
label: Generate certificate through cert-manager
type: boolean
group: "Certificates"
show_subquestion_if: false
subquestions:
- variable: certificates.secretName
default: webhook-server-cert
description: Mount a CA Bundle from an existing Secret in the same namespace as the GMSA webhook. Secret must contain keys for the CA certificate (ca.crt), the TLS certificate (tls.crt), and the TLS private key (tls.key) to be used by the webhook.
label: CA Bundle From Existing Secret
type: string
required: true

View File

@ -0,0 +1,48 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
{{/* Create chart name and version as used by the chart label. */}}
{{- define "gmsa.chartref" -}}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
{{- end }}
{{/* Determine apiVersion for cert-manager */}}
{{- define "cert-manager.apiversion" -}}
{{- $certmanagerVer := split "." .Values.certificates.certManager.version -}}
{{- if or (.Capabilities.APIVersions.Has "cert-manager.io/v1") (and (gt (len $certmanagerVer._0) 0) (eq (int $certmanagerVer._0) 1) (ge (int $certmanagerVer._1) 0)) }}
apiVersion: cert-manager.io/v1
{{- else if or (.Capabilities.APIVersions.Has "cert-manager.io/v1beta1") (and (gt (len $certmanagerVer._0) 0) (eq (int $certmanagerVer._0) 0) (ge (int $certmanagerVer._1) 16)) }}
apiVersion: cert-manager.io/v1beta1
{{- else if or (.Capabilities.APIVersions.Has "cert-manager.io/v1alpha2") (and (gt (len $certmanagerVer._0) 0) (eq (int $certmanagerVer._0) 0) (ge (int $certmanagerVer._1) 11)) }}
apiVersion: cert-manager.io/v1alpha2
{{- else if or (.Capabilities.APIVersions.Has "certmanager.k8s.io/v1alpha1") (and (gt (len $certmanagerVer._0) 0) (eq (int $certmanagerVer._0) 0) (lt (int $certmanagerVer._1) 11)) }}
apiVersion: cert-manager.io/v1alpha1
{{- else }}
apiVersion: cert-manager.io/v1
{{- end }}
{{- end }}
{{- define "certificates.cabundle"}}
{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
{{- $secret := (lookup "v1" "Secret" .Release.Namespace .Values.certificates.secretName) -}}
{{- if lt (len $secret) 1 -}}
{{- required (printf "CA Bundle secret '%s' in namespace '%s' must exist" .Values.certificates.secretName .Release.Namespace) "" -}}
{{- else -}}
{{- if not (hasKey $secret "data") -}}
{{- required (printf "CA Bundle secret '%s' in namespace '%s' is empty" .Values.certificates.secretName .Release.Namespace) "" -}}
{{- end -}}
{{- if or (not (hasKey $secret.data "ca.crt")) (not (hasKey $secret.data "tls.crt")) (not (hasKey $secret.data "tls.key")) -}}
{{- required (printf "CA Bundle secret '%s' in namespace '%s' must contain ca.crt, tls.key, and tls.cert; found the following keys in the secret: %s" .Values.certificates.secretName .Release.Namespace $secret.data) "" -}}
{{- end -}}
{{- end -}}
{{- get $secret.data "ca.crt" }}
{{- else -}}
INSERT_CERTIFICATE_FROM_SECRET
{{- end -}}
{{- end }}

View File

@ -0,0 +1,16 @@
# the RBAC role that the webhook needs to:
# * read GMSA custom resources
# * check authorizations to use GMSA cred specs
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
rules:
- apiGroups: ["windows.k8s.io"]
resources: ["gmsacredentialspecs"]
verbs: ["get", "use"]
- apiGroups: ["authorization.k8s.io"]
resources: ["localsubjectaccessreviews"]
verbs: ["create"]

View File

@ -0,0 +1,15 @@
# bind that role to the webhook's service account
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Release.Name }}
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,24 @@
{{- if .Values.credential.enabled -}}
apiVersion: windows.k8s.io/v1
kind: GMSACredentialSpec
metadata:
name: {{ .Values.credential.domainJoinConfig.machineAccountName | lower }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
credspec:
ActiveDirectoryConfig:
GroupManagedServiceAccounts:
- Name: {{ .Values.credential.domainJoinConfig.machineAccountNamename }}
Scope: {{ .Values.credential.domainJoinConfig.netBiosName }}
- Name: {{ .Values.credential.domainJoinConfig.machineAccountNamename }}
Scope: {{ .Values.credential.domainJoinConfig.dnsName }}
CmsPlugins:
- ActiveDirectory
DomainJoinConfig:
DnsName: {{ .Values.credential.domainJoinConfig.dnsName }}
DnsTreeName: {{ .Values.credential.domainJoinConfig.dnsName }}
Guid: {{ .Values.credential.domainJoinConfig.guid }}
MachineAccountName: {{ .Values.credential.domainJoinConfig.machineAccountName }}
NetBiosName: {{ .Values.credential.domainJoinConfig.netBiosName }}
Sid: {{ .Values.credential.domainJoinConfig.sid }}
{{- end -}}

View File

@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
seccomp.security.alpha.kubernetes.io/pod: runtime/default
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ .Release.Name }}
spec:
{{- if .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
serviceAccountName: {{ .Release.Name }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Release.Name }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.imagePullPolicy }}
readinessProbe:
httpGet:
scheme: HTTPS
path: /health
port: 443
ports:
- containerPort: 443
{{- if .Values.securityContext }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
{{- end }}
volumeMounts:
- name: tls
mountPath: "/etc/ssl/rancher-windows-gmsa-webhook"
readOnly: true
env:
- name: TLS_KEY
value: /etc/ssl/rancher-windows-gmsa-webhook/tls.key
- name: TLS_CRT
value: /etc/ssl/rancher-windows-gmsa-webhook/tls.crt
volumes:
- name: tls
secret:
secretName: {{ .Values.certificates.secretName }}
items:
- key: tls.key
path: tls.key
- key: tls.crt
path: tls.crt

View File

@ -0,0 +1,26 @@
{{- if .Values.certificates.certManager.enabled -}}
{{ template "cert-manager.apiversion" . }}
kind: Certificate
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
spec:
dnsNames:
- {{ .Release.Name }}.{{ .Release.Namespace }}.svc
- {{ .Release.Name }}.{{ .Release.Namespace }}.svc.cluster.local
issuerRef:
kind: Issuer
name: {{ .Release.Name }}
secretName: {{ .Values.certificates.secretName }}
---
{{ template "cert-manager.apiversion" . }}
kind: Issuer
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
spec:
selfSigned: {}
{{- end -}}

View File

@ -0,0 +1,34 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ .Release.Name }}
{{- if .Values.certificates.certManager.enabled }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ .Release.Name }}
{{- end }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
webhooks:
- name: admission-webhook.windows-gmsa.sigs.k8s.io
clientConfig:
service:
name: {{ .Release.Name }}
namespace: {{.Release.Namespace}}
path: "/mutate"
{{- if not (.Values.certificates.certManager.enabled) }}
caBundle: {{ template "certificates.cabundle" . }}
{{- end }}
rules:
- operations: ["CREATE"]
apiGroups: [""]
apiVersions: ["*"]
resources: ["pods"]
failurePolicy: Fail
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
# don't run on ${NAMESPACE}
namespaceSelector:
matchExpressions:
- key: gmsa-webhook
operator: NotIn
values: [disabled]

View File

@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
spec:
ports:
- port: 443
targetPort: 443
selector:
app: {{ .Release.Name }}

View File

@ -0,0 +1,8 @@
# the service account for the webhook
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}

View File

@ -0,0 +1,14 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "windows.k8s.io/v1alpha1/GMSACredentialSpec" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,34 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ .Release.Name }}
{{- if .Values.certificates.certManager.enabled }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ .Release.Name }}
{{- end }}
labels: {{ include "gmsa.chartref" . | nindent 4 }}
webhooks:
- name: admission-webhook.windows-gmsa.sigs.k8s.io
clientConfig:
service:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
path: "/validate"
{{- if not (.Values.certificates.certManager.enabled) }}
caBundle: {{ template "certificates.cabundle" . }}
{{- end }}
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: [""]
apiVersions: ["*"]
resources: ["pods"]
failurePolicy: Fail
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
# don't run on ${NAMESPACE}
namespaceSelector:
matchExpressions:
- key: gmsa-webhook
operator: NotIn
values: [disabled]

View File

@ -0,0 +1,42 @@
certificates:
certManager:
# Enable cert manager integration. Cert manager should be already installed at the k8s cluster
enabled: true
version: ""
# If cert-manager integration is disabled, upload certs data (ca.crt, tls.crt and tls.key) as k8s secretName in the namespace
secretName: gmsa-server-cert
credential:
enabled: true
domainJoinConfig:
dnsName: "" #DNS Domain Name
dnsTreeName: "" #DNS Domain Name Root
guid: "" #GUID
machineAccountName: "" #Username of the GMSA account
netBiosName: "" #NETBIOS Domain Name
sid: "" #SID of GMSA
image:
repository: rancher/mirrored-sigwindowstools-k8s-gmsa-webhook
tag: v0.3.0
imagePullPolicy: IfNotPresent
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.22.6
pullPolicy: IfNotPresent
## SecurityContext holds pod-level security attributes and common container settings.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
podSecurityContext:
runAsNonRoot: false
# Currently, required to run as root due to port binding within the container.
runAsUser: 0
securityContext: {}
tolerations: []

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,18 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.22.0-0'
catalog.cattle.io/namespace: cattle-wins-system
catalog.cattle.io/os: windows
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-wins-upgrader
apiVersion: v2
appVersion: 0.1.1
description: Manages upgrading the wins server version and configuration across all
of your Windows nodes
maintainers:
- email: arvind.iyengar@suse.com
name: aiyengar2
name: rancher-wins-upgrader
type: application
version: 101.0.0+up0.0.1

View File

@ -0,0 +1,41 @@
# Rancher Wins Upgrader
A Rancher chart that handles keeping the wins server version and config across some (or all) of the Windows nodes on a Kubernetes cluster in sync. It does this by running a simple script to replace the contents of the `\etc\rancher\wins` directory with the newly specified config and wins image via one or more DaemonSets. Once executed, the script will simply sleep forever.
## How does this work?
A DaemonSet of initContainers copies the provided config (stored in a ConfigMap) into `\etc\rancher\wins\config` and runs `wins cli prc run --path {path-to-wins} --args {up}`, where `.\wins up[grade]` is a Go program that runs a simple Powershell script that forces an upgrade of the binary used by the `rancher-wins` service across all of your Windows hosts.
TLDR: we use wins (cli) to pass wins (upgrade) to wins (server) in order to update wins (server) on the host on demand.
## Cluster / Node Requirements
This Helm chart is intended to be used on a Windows cluster that meets the following two requirements:
- A Windows Service called `rancher-wins` is currently running on each Windows host (e.g. `.\wins srv app run --register; Start-Service -Name rancher-wins` or `.\wins up` has been run on the host) that is running a wins server version of v0.1.0+.
- The wins config used by each Windows host's `rancher-wins` Service has `{{ .Values.prefixPath }}etc\rancher\wins\wins-upgrade.exe` within `whiteList.processPath` so that the new wins version can be delivered onto the host
If the cluster you are installing this chart on is a custom cluster that was created via RKE1 with Windows Support enabled, your nodes should already meet the first requirement; this should have been added as part of [the bootstrapping process for adding the Windows node onto your RKE1 cluster](https://github.com/rancher/rancher/blob/master/package/windows/bootstrap.ps1).
However, depending on the bootstrap.ps1 version that was used when you spun up your Windows cluster, it is possible that the second requirement is not met yet.
If the second requirement is not met, there are two options to reconcile:
### Manual Update
This is the recommended approach for updating your Windows hosts, but it requires the user to log onto every Windows host to upgrade the wins config. After logging onto each host, you will need to do manually update the wins config.
By default, the wins config is located in `c:\etc\rancher\wins\config`, but you could use the following powershell command to identify the command line arguments passed into the `rancher-wins` service (`--config` corresponds to the config path on the host):
```powershell
(Get-CimInstance Win32_Service -Filter 'Name = "rancher-wins"').PathName
```
Once complete, restart the service:
```powershell
Restart-Service -Name "rancher-wins" | Stop-Service
```
### Masquerading (Use at your own risk. Here be dragons...)
This option is *only* meant as a hack to allow users who are currently operating on Windows clusters that have not whitelisted `{{ .Values.prefixPath }}etc\rancher\wins\wins-upgrade.exe`. If you plan to use this option, please ensure that you immediately upgrade this chart with `masquerade.enabled=false` and perform another `helm upgrade` to avoid any unintentional consequences (e.g. failure to install the original process that you meant to whitelist on the host).
If `masquerade.enabled=True`, this chart will have the wins client execute `wins-upgrade.exe` payload under the `masquerade.as` path provided, effectively tricking the `wins server` into running the binary although it has not been whitelisted. This relies on the fact that the wins server does not / cannot do any verification on the binary passed into wins since it does track a list of valid checksums on the binaries provided to it.

View File

@ -0,0 +1,19 @@
# Rancher Wins Upgrader
A Rancher chart that handles keeping the wins server version and config across some (or all) of the Windows nodes on a Kubernetes cluster in sync. It does this by running a simple script to replace the contents of the `\etc\rancher\wins` directory with the newly specified config and wins image via one or more DaemonSets. Once executed, the script will simply sleep forever.
## How does this work?
A DaemonSet of initContainers copies the provided config (stored in a ConfigMap) into `\etc\rancher\wins\config` and runs `wins cli prc run --path {path-to-wins} --args {up}`, where `.\wins up[grade]` is a Go program that runs a simple Powershell script that forces an upgrade of the binary used by the `rancher-wins` service across all of your Windows hosts.
TLDR: we use wins (cli) to pass wins (upgrade) to wins (server) in order to update wins (server) on the host on demand.
## Cluster / Node Requirements
This Helm chart is intended to be used on a Windows cluster that meets the following two requirements:
- A Windows Service called `rancher-wins` is currently running on each Windows host (e.g. `.\wins srv app run --register; Start-Service -Name rancher-wins` or `.\wins up` has been run on the host) that is running a wins server version of v0.1.0+.
- The wins config used by each Windows host's `rancher-wins` Service has `{{ .Values.prefixPath }}etc\rancher\wins\wins-upgrade.exe` within `whiteList.processPath` so that the new wins version can be delivered onto the host
If the cluster you are installing this chart on is a custom cluster that was created via RKE1 with Windows Support enabled after wins v0.1.0+ was released (i.e. Rancher 2.5.7+), your nodes should already meet the first requirement; this should have been added as part of [the bootstrapping process for adding the Windows node onto your RKE1 cluster](https://github.com/rancher/rancher/blob/master/package/windows/bootstrap.ps1).
If not, please see the README.md for more information on how you can use this chart.

View File

@ -0,0 +1,4 @@
$ErrorActionPreference = 'Stop'
# Sleep forever, since a DaemonSet's restartPolicy must be Always
while(1) { Start-Sleep -s 3600 }

View File

@ -0,0 +1,72 @@
$ErrorActionPreference = 'Stop'
function Create-Directory
{
param (
[parameter(Mandatory = $false, ValueFromPipeline = $true)] [string]$Path
)
if (Test-Path -Path $Path) {
if (-not (Test-Path -Path $Path -PathType Container)) {
# clean the same path file
Remove-Item -Recurse -Force -Path $Path -ErrorAction Ignore | Out-Null
}
return
}
New-Item -Force -ItemType Directory -Path $Path | Out-Null
}
function Transfer-File
{
param (
[parameter(Mandatory = $true)] [string]$Src,
[parameter(Mandatory = $true)] [string]$Dst
)
if (Test-Path -PathType leaf -Path $Dst) {
$dstHasher = Get-FileHash -Path $Dst
$srcHasher = Get-FileHash -Path $Src
if ($dstHasher.Hash -eq $srcHasher.Hash) {
return
}
}
$null = Copy-Item -Force -Path $Src -Destination $Dst
}
$prefixPath = 'c:\'
if ($env:CATTLE_PREFIX_PATH) {
$prefixPath = $env:CATTLE_PREFIX_PATH
}
$winsUpgradePath = $('{0}etc\rancher\wins\wins-upgrade.exe' -f $prefixPath)
if ($env:WINS_UPGRADE_PATH) {
$winsUpgradePath = $env:WINS_UPGRADE_PATH
}
$winsUpgradeDir = Split-Path -Path $winsUpgradePath
$winsUpgradeFilename = Split-Path -Path $winsUpgradePath -Leaf
Create-Directory -Path $winsUpgradeDir
Transfer-File -Src "c:\Windows\wins.exe" -Dst $winsUpgradePath
Create-Directory -Path "c:\host\etc\rancher\wins"
Transfer-File -Src $winsUpgradePath -Dst "c:\host\etc\rancher\wins\$winsUpgradeFilename"
Transfer-File -Src "c:\scripts\config" -Dst "c:\host\etc\rancher\wins\config"
$winsOut = wins.exe cli prc run --path=$winsUpgradePath --args="up --wins-args=`'--config=$winsUpgradeDir\config`'"
Write-Host $winsOut
if ($winsOut -match ".* rpc error: code = Unavailable desc = transport is closing") {
Write-Host "Successfully upgraded"
exit 0
} elseif ($LastExitCode -ne 0) {
Write-Host "Returned exit $LastExitCode"
exit $LastExitCode
} else {
Write-Host "Returned exit 0, but did not receive expected output from .\wins up"
exit 1
}

View File

@ -0,0 +1,63 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# General
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
The components in this chart create additional resources that expand the longest created name strings.
The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26.
*/}}
{{- define "winsUpgrader.name" -}}
wins-upgrader
{{- end -}}
{{- define "winsUpgrader.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride -}}
{{- end -}}
{{- define "winsUpgrader.labels" -}}
k8s-app: {{ template "winsUpgrader.name" . }}
release: {{ .Release.Name }}
provider: kubernetes
{{- end -}}
{{- define "winsUpgrader.validatePathPrefix" -}}
{{- if .Values.global.cattle.rkeWindowsPathPrefix -}}
{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}}
{{- if (not (hasSuffix "\\" $prefixPath)) -}}
{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "winsUpgrader.winsHostPath" -}}
{{ default "c:\\" .Values.global.cattle.rkeWindowsPathPrefix | replace "\\\\" "\\" | replace "\\" "/" }}etc/rancher/wins
{{- end -}}
{{- define "winsUpgrader.winsMasqueradePath" -}}
{{ tpl .Values.masquerade.as . | required "Must provide name for .Values.masquerade.as if enabled" | replace "\\\\" "\\" | replace "\\" "/" }}
{{- end -}}
{{- define "winsUpgrader.winsMasqueradeHostPath" -}}
{{ include "winsUpgrader.winsMasqueradePath" . | dir }}
{{- end -}}
{{- define "winsUpgrader.nodeSelector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: windows
{{- else -}}
kubernetes.io/os: windows
{{- end -}}
{{- end -}}
{{- define "winsUpgrader.tolerations" -}}
- operator: Exists
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More