mirror of https://git.rancher.io/charts
release feature charts v2.5.6
parent
999fd4788a
commit
9d6c294edb
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,12 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: fleet-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: fleet-agent
|
||||
apiVersion: v2
|
||||
appVersion: 0.3.4
|
||||
description: Fleet Manager Agent - GitOps at Scale
|
||||
icon: https://charts.rancher.io/assets/logos/fleet.svg
|
||||
name: fleet-agent
|
||||
version: 0.3.400
|
|
@ -0,0 +1,7 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,12 @@
|
|||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: fleet-agent
|
||||
data:
|
||||
config: |-
|
||||
{
|
||||
{{ if .Values.labels }}
|
||||
"labels":{{toJson .Values.labels}},
|
||||
{{ end }}
|
||||
"clientID":"{{.Values.clientID}}"
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: fleet-agent
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fleet-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fleet-agent
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
|
||||
name: fleet-agent
|
||||
serviceAccountName: fleet-agent
|
||||
{{- with .Values.fleetAgent.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.fleetAgent.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-allow-all
|
||||
namespace: {{ .Values.internal.systemNamespace }}
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
egress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: patch-fleet-sa
|
||||
annotations:
|
||||
"helm.sh/hook": post-install, post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: fleet-agent
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: sa
|
||||
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
|
||||
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
|
||||
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
|
||||
args: ["-n", {{ .Values.internal.systemNamespace }}]
|
||||
{{- with .Values.kubectl.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.kubectl.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
backoffLimit: 1
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fleet-agent-system-fleet-agent-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: fleet-agent-system-fleet-agent-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: fleet-agent-system-fleet-agent-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fleet-agent
|
||||
namespace: {{.Release.Namespace}}
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
|
||||
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
|
||||
token: "{{b64enc .Values.token}}"
|
||||
apiServerURL: "{{b64enc .Values.apiServerURL}}"
|
||||
apiServerCA: "{{b64enc .Values.apiServerCA}}"
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: fleet-agent-bootstrap
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fleet-agent
|
|
@ -0,0 +1,11 @@
|
|||
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
|
||||
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
|
||||
{{end}}
|
||||
|
||||
{{if ne .Release.Name .Values.internal.managedReleaseName }}
|
||||
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
|
||||
{{end}}
|
||||
|
||||
{{if not .Values.apiServerURL }}
|
||||
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
|
||||
{{end}}
|
|
@ -0,0 +1,52 @@
|
|||
image:
|
||||
repository: rancher/fleet-agent
|
||||
tag: v0.3.4
|
||||
|
||||
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
|
||||
# Example: https://example.com:6443
|
||||
apiServerURL: ""
|
||||
|
||||
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
|
||||
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
|
||||
apiServerCA: ""
|
||||
|
||||
# The cluster registration value
|
||||
token: ""
|
||||
|
||||
# Labels to add to the cluster upon registration only. They are not added after the fact.
|
||||
#labels:
|
||||
# foo: bar
|
||||
|
||||
# The client ID of the cluster to associate with
|
||||
clientID: ""
|
||||
|
||||
# The namespace of the cluster we are register with
|
||||
clusterNamespace: ""
|
||||
|
||||
# The namespace containing the clusters registration secrets
|
||||
systemRegistrationNamespace: fleet-clusters-system
|
||||
|
||||
# Please do not change the below setting unless you really know what you are doing
|
||||
internal:
|
||||
systemNamespace: fleet-system
|
||||
managedReleaseName: fleet-agent
|
||||
|
||||
# The nodeSelector and tolerations for the agent deployment
|
||||
fleetAgent:
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
kubectl:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: cattle.io/os
|
||||
operator: "Equal"
|
||||
value: "linux"
|
||||
effect: NoSchedule
|
||||
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
kubectl:
|
||||
repository: rancher/kubectl
|
||||
tag: v1.18.6
|
|
@ -0,0 +1,12 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: fleet-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: fleet-crd
|
||||
apiVersion: v2
|
||||
appVersion: 0.3.4
|
||||
description: Fleet Manager CustomResourceDefinitions
|
||||
icon: https://charts.rancher.io/assets/logos/fleet.svg
|
||||
name: fleet-crd
|
||||
version: 0.3.400
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,15 @@
|
|||
annotations:
|
||||
catalog.cattle.io/auto-install: fleet-crd=match
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/experimental: "true"
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: fleet-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
|
||||
catalog.cattle.io/release-name: fleet
|
||||
apiVersion: v2
|
||||
appVersion: 0.3.4
|
||||
description: Fleet Manager - GitOps at Scale
|
||||
icon: https://charts.rancher.io/assets/logos/fleet.svg
|
||||
name: fleet
|
||||
version: 0.3.400
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v2
|
||||
appVersion: 0.1.13
|
||||
description: Controller that run jobs based on git events
|
||||
name: gitjob
|
||||
version: 0.1.13
|
|
@ -0,0 +1,7 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: gitjob
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- 'jobs'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- 'pods'
|
||||
verbs:
|
||||
- 'list'
|
||||
- 'get'
|
||||
- 'watch'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- 'secrets'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- 'configmaps'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- "gitjob.cattle.io"
|
||||
resources:
|
||||
- "gitjobs"
|
||||
- "gitjobs/status"
|
||||
verbs:
|
||||
- "*"
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: gitjob-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gitjob
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: gitjob
|
||||
namespace: {{ .Release.Namespace }}
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: gitjob
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: "gitjob"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: "gitjob"
|
||||
spec:
|
||||
serviceAccountName: gitjob
|
||||
containers:
|
||||
- image: "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
|
||||
name: gitjob
|
||||
command:
|
||||
- gitjob
|
||||
- --tekton-image
|
||||
- "{{ template "system_default_registry" . }}{{ .Values.tekton.repository }}:{{ .Values.tekton.tag }}"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- if .Values.proxy }}
|
||||
- name: HTTP_PROXY
|
||||
value: {{ .Values.proxy }}
|
||||
- name: HTTPS_PROXY
|
||||
value: {{ .Values.proxy }}
|
||||
- name: NO_PROXY
|
||||
value: {{ .Values.noProxy }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: gitjob
|
||||
spec:
|
||||
ports:
|
||||
- name: http-80
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: "gitjob"
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: gitjob
|
|
@ -0,0 +1,26 @@
|
|||
gitjob:
|
||||
repository: rancher/gitjob
|
||||
tag: v0.1.13
|
||||
|
||||
tekton:
|
||||
repository: rancher/tekton-utils
|
||||
tag: v0.1.1
|
||||
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
# http[s] proxy server
|
||||
# proxy: http://<username>@<password>:<url>:<port>
|
||||
|
||||
# comma separated list of domains or ip addresses that will not use the proxy
|
||||
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
|
||||
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
tolerations:
|
||||
- key: cattle.io/os
|
||||
operator: "Equal"
|
||||
value: "linux"
|
||||
effect: NoSchedule
|
|
@ -0,0 +1,7 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
data:
|
||||
config: |
|
||||
{
|
||||
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
|
||||
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
|
||||
"apiServerURL": "{{.Values.apiServerURL}}",
|
||||
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
|
||||
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
|
||||
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
|
||||
"bootstrap": {
|
||||
"paths": "{{.Values.bootstrap.paths}}",
|
||||
"repo": "{{.Values.bootstrap.repo}}",
|
||||
"secret": "{{.Values.bootstrap.secret}}",
|
||||
"branch": "{{.Values.bootstrap.branch}}",
|
||||
"namespace": "{{.Values.bootstrap.namespace}}",
|
||||
},
|
||||
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
|
||||
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fleet-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fleet-controller
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
|
||||
name: fleet-controller
|
||||
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
|
||||
serviceAccountName: fleet-controller
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,106 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- gitjob.cattle.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- fleet.cattle.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- clusterroles
|
||||
- clusterrolebindings
|
||||
- roles
|
||||
- rolebindings
|
||||
verbs:
|
||||
- '*'
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: fleet-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fleet-controller
|
||||
namespace: {{.Release.Namespace}}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- '*'
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: fleet-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fleet-controller
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fleet-controller-bootstrap
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: fleet-controller-bootstrap
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: fleet-controller-bootstrap
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fleet-controller-bootstrap
|
||||
namespace: {{.Release.Namespace}}
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fleet-controller
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fleet-controller-bootstrap
|
|
@ -0,0 +1,47 @@
|
|||
image:
|
||||
repository: rancher/fleet
|
||||
tag: v0.3.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
agentImage:
|
||||
repository: rancher/fleet-agent
|
||||
tag: v0.3.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
# For cluster registration the public URL of the Kubernetes API server must be set here
|
||||
# Example: https://example.com:6443
|
||||
apiServerURL: ""
|
||||
|
||||
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
|
||||
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
|
||||
apiServerCA: ""
|
||||
|
||||
# A duration string for how often agents should report a heartbeat
|
||||
agentCheckinInterval: "15m"
|
||||
|
||||
# Whether you want to allow cluster upon registration to specify their labels.
|
||||
ignoreClusterRegistrationLabels: false
|
||||
|
||||
bootstrap:
|
||||
# The namespace that will be autocreated and the local cluster will be registered in
|
||||
namespace: fleet-local
|
||||
# A repo to add at install time that will deploy to the local cluster. This allows
|
||||
# one to fully bootstrap fleet, it's configuration and all it's downstream clusters
|
||||
# in one shot.
|
||||
repo: ""
|
||||
secret: ""
|
||||
branch: master
|
||||
paths: ""
|
||||
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
tolerations:
|
||||
- key: cattle.io/os
|
||||
operator: "Equal"
|
||||
value: "linux"
|
||||
effect: NoSchedule
|
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/release-name: longhorn-crd
|
||||
apiVersion: v1
|
||||
description: Installs the CRDs for longhorn.
|
||||
name: longhorn-crd
|
||||
type: application
|
||||
version: 1.1.001
|
|
@ -0,0 +1,2 @@
|
|||
# longhorn-crd
|
||||
A Rancher chart that installs the CRDs used by longhorn.
|
|
@ -0,0 +1,420 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: Engine
|
||||
name: engines.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Engine
|
||||
listKind: EngineList
|
||||
plural: engines
|
||||
shortNames:
|
||||
- lhe
|
||||
singular: engine
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the engine
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the engine is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the engine
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the engine
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: Replica
|
||||
name: replicas.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Replica
|
||||
listKind: ReplicaList
|
||||
plural: replicas
|
||||
shortNames:
|
||||
- lhr
|
||||
singular: replica
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the replica
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the replica is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Disk
|
||||
type: string
|
||||
description: The disk that the replica is on
|
||||
jsonPath: .spec.diskID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the replica
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the replica
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: Setting
|
||||
name: settings.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Setting
|
||||
listKind: SettingList
|
||||
plural: settings
|
||||
shortNames:
|
||||
- lhs
|
||||
singular: setting
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
additionalPrinterColumns:
|
||||
- name: Value
|
||||
type: string
|
||||
description: The value of the setting
|
||||
jsonPath: .value
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: Volume
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Volume
|
||||
listKind: VolumeList
|
||||
plural: volumes
|
||||
shortNames:
|
||||
- lhv
|
||||
singular: volume
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the volume
|
||||
jsonPath: .status.state
|
||||
- name: Robustness
|
||||
type: string
|
||||
description: The robustness of the volume
|
||||
jsonPath: .status.robustness
|
||||
- name: Scheduled
|
||||
type: string
|
||||
description: The scheduled condition of the volume
|
||||
jsonPath: .status.conditions['scheduled']['status']
|
||||
- name: Size
|
||||
type: string
|
||||
description: The size of the volume
|
||||
jsonPath: .spec.size
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the volume is currently attaching to
|
||||
jsonPath: .status.currentNodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: EngineImage
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: EngineImage
|
||||
listKind: EngineImageList
|
||||
plural: engineimages
|
||||
shortNames:
|
||||
- lhei
|
||||
singular: engineimage
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: State of the engine image
|
||||
jsonPath: .status.state
|
||||
- name: Image
|
||||
type: string
|
||||
description: The Longhorn engine image
|
||||
jsonPath: .spec.image
|
||||
- name: RefCount
|
||||
type: integer
|
||||
description: Number of volumes are using the engine image
|
||||
jsonPath: .status.refCount
|
||||
- name: BuildDate
|
||||
type: date
|
||||
description: The build date of the engine image
|
||||
jsonPath: .status.buildDate
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: Node
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Node
|
||||
listKind: NodeList
|
||||
plural: nodes
|
||||
shortNames:
|
||||
- lhn
|
||||
singular: node
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Ready
|
||||
type: string
|
||||
description: Indicate whether the node is ready
|
||||
jsonPath: .status.conditions['Ready']['status']
|
||||
- name: AllowScheduling
|
||||
type: boolean
|
||||
description: Indicate whether the user disabled/enabled replica scheduling for the node
|
||||
jsonPath: .spec.allowScheduling
|
||||
- name: Schedulable
|
||||
type: string
|
||||
description: Indicate whether Longhorn can schedule replicas on the node
|
||||
jsonPath: .status.conditions['Schedulable']['status']
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: InstanceManager
|
||||
name: instancemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: InstanceManager
|
||||
listKind: InstanceManagerList
|
||||
plural: instancemanagers
|
||||
shortNames:
|
||||
- lhim
|
||||
singular: instancemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the instance manager
|
||||
jsonPath: .status.currentState
|
||||
- name: Type
|
||||
type: string
|
||||
description: The type of the instance manager (engine or replica)
|
||||
jsonPath: .spec.type
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the instance manager is running on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.0
|
||||
longhorn-manager: ShareManager
|
||||
name: sharemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: ShareManager
|
||||
listKind: ShareManagerList
|
||||
plural: sharemanagers
|
||||
shortNames:
|
||||
- lhsm
|
||||
singular: sharemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the share manager
|
||||
jsonPath: .status.state
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the share manager is owned by
|
||||
jsonPath: .status.ownerID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
|
@ -0,0 +1,37 @@
|
|||
annotations:
|
||||
catalog.cattle.io/auto-install: longhorn-crd=match
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Longhorn
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
|
||||
catalog.cattle.io/release-name: longhorn
|
||||
catalog.cattle.io/ui-component: longhorn
|
||||
apiVersion: v1
|
||||
appVersion: v1.1.0
|
||||
description: Longhorn is a distributed block storage system for Kubernetes.
|
||||
home: https://github.com/longhorn/longhorn
|
||||
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.svg?sanitize=true
|
||||
keywords:
|
||||
- longhorn
|
||||
- storage
|
||||
- distributed
|
||||
- block
|
||||
- device
|
||||
- iscsi
|
||||
kubeVersion: '>=v1.16.0-r0'
|
||||
maintainers:
|
||||
- email: maintainers@longhorn.io
|
||||
name: Longhorn maintainers
|
||||
- email: sheng@yasker.org
|
||||
name: Sheng Yang
|
||||
name: longhorn
|
||||
sources:
|
||||
- https://github.com/longhorn/longhorn
|
||||
- https://github.com/longhorn/longhorn-engine
|
||||
- https://github.com/longhorn/longhorn-instance-manager
|
||||
- https://github.com/longhorn/longhorn-share-manager
|
||||
- https://github.com/longhorn/longhorn-manager
|
||||
- https://github.com/longhorn/longhorn-ui
|
||||
- https://github.com/longhorn/longhorn-tests
|
||||
version: 1.1.001
|
|
@ -0,0 +1,32 @@
|
|||
# Longhorn Chart
|
||||
|
||||
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
|
||||
|
||||
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
## Source Code
|
||||
|
||||
Longhorn is 100% open source software. Project source code is spread across a number of repos:
|
||||
|
||||
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
|
||||
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
|
||||
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
|
||||
4. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
|
||||
5. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Docker v1.13+
|
||||
2. Kubernetes v1.16+
|
||||
3. Make sure `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
|
||||
4. Make sure `open-iscsi` has been installed in all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
|
||||
|
||||
From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
|
||||
|
||||
|
||||
---
|
||||
Please see [link](https://github.com/longhorn/longhorn) for more information.
|
|
@ -0,0 +1,11 @@
|
|||
# Longhorn
|
||||
|
||||
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
|
||||
|
||||
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
|
||||
|
||||
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
|
||||
|
||||
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
|
|
@ -0,0 +1,512 @@
|
|||
categories:
|
||||
- storage
|
||||
namespace: longhorn-system
|
||||
questions:
|
||||
- variable: image.defaultImage
|
||||
default: "true"
|
||||
description: "Use default Longhorn images"
|
||||
label: Use Default Images
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Longhorn Images"
|
||||
subquestions:
|
||||
- variable: image.longhorn.manager.repository
|
||||
default: rancher/longhornio-longhorn-manager
|
||||
description: "Specify Longhorn Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.manager.tag
|
||||
default: v1.1.0
|
||||
description: "Specify Longhorn Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.repository
|
||||
default: rancher/longhornio-longhorn-engine
|
||||
description: "Specify Longhorn Engine Image Repository"
|
||||
type: string
|
||||
label: Longhorn Engine Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.tag
|
||||
default: v1.1.0
|
||||
description: "Specify Longhorn Engine Image Tag"
|
||||
type: string
|
||||
label: Longhorn Engine Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.repository
|
||||
default: rancher/longhornio-longhorn-ui
|
||||
description: "Specify Longhorn UI Image Repository"
|
||||
type: string
|
||||
label: Longhorn UI Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.tag
|
||||
default: v1.1.0
|
||||
description: "Specify Longhorn UI Image Tag"
|
||||
type: string
|
||||
label: Longhorn UI Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.repository
|
||||
default: rancher/longhornio-longhorn-instance-manager
|
||||
description: "Specify Longhorn Instance Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.tag
|
||||
default: v1_20201216
|
||||
description: "Specify Longhorn Instance Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.repository
|
||||
default: rancher/longhornio-longhorn-share-manager
|
||||
description: "Specify Longhorn Share Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.tag
|
||||
default: v1_20201204
|
||||
description: "Specify Longhorn Share Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.csi.attacher.repository
|
||||
default: rancher/longhornio-csi-attacher
|
||||
description: "Specify CSI attacher image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.attacher.tag
|
||||
default: v2.2.1-lh1
|
||||
description: "Specify CSI attacher image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.repository
|
||||
default: rancher/longhornio-csi-provisioner
|
||||
description: "Specify CSI provisioner image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.tag
|
||||
default: v1.6.0-lh1
|
||||
description: "Specify CSI provisioner image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.repository
|
||||
default: rancher/longhornio-csi-node-driver-registrar
|
||||
description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.tag
|
||||
default: v1.2.0-lh1
|
||||
description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.repository
|
||||
default: rancher/longhornio-csi-resizer
|
||||
description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.tag
|
||||
default: v0.5.1-lh1
|
||||
description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.repository
|
||||
default: rancher/longhornio-csi-snapshotter
|
||||
description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.tag
|
||||
default: v2.1.1-lh1
|
||||
description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: privateRegistry.registryUrl
|
||||
label: Private registry URL
|
||||
description: "URL of private registry. Leave blank to apply system default registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryUser
|
||||
label: Private registry user
|
||||
description: "User used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryPasswd
|
||||
label: Private registry password
|
||||
description: "Password used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: password
|
||||
default: ""
|
||||
- variable: privateRegistry.registrySecret
|
||||
label: Private registry secret name
|
||||
description: "Longhorn will automatically generate a Kubernetes secret with this name and use it to pull images from your private registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: longhorn.default_setting
|
||||
default: "false"
|
||||
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
|
||||
label: "Customize Default Settings"
|
||||
type: boolean
|
||||
show_subquestion_if: true
|
||||
group: "Longhorn Default Settings"
|
||||
subquestions:
|
||||
- variable: csi.kubeletRootDir
|
||||
default:
|
||||
description: "Specify kubelet root-dir. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Kubelet Root Directory
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.attacherReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Attacher. By default 3."
|
||||
label: Longhorn CSI Attacher replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.provisionerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Provisioner. By default 3."
|
||||
label: Longhorn CSI Provisioner replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.resizerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Resizer. By default 3."
|
||||
label: Longhorn CSI Resizer replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.snapshotterReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Snapshotter. By default 3."
|
||||
label: Longhorn CSI Snapshotter replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: defaultSettings.backupTarget
|
||||
label: Backup Target
|
||||
description: "The endpoint used to access the backupstore. NFS and S3 are supported."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.backupTargetCredentialSecret
|
||||
label: Backup Target Credential Secret
|
||||
description: "The name of the Kubernetes secret associated with the backup target."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
|
||||
label: Allow Recurring Job While Volume Is Detached
|
||||
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
|
||||
Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.createDefaultDiskLabeledNodes
|
||||
label: Create Default Disk on Labeled Nodes
|
||||
description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.defaultDataPath
|
||||
label: Default Data Path
|
||||
description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "/var/lib/longhorn/"
|
||||
- variable: defaultSettings.defaultDataLocality
|
||||
label: Default Data Locality
|
||||
description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
|
||||
This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
|
||||
The available modes are:
|
||||
- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
|
||||
- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "disabled"
|
||||
- "best-effort"
|
||||
default: "disabled"
|
||||
- variable: defaultSettings.replicaSoftAntiAffinity
|
||||
label: Replica Node Level Soft Anti-Affinity
|
||||
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.storageOverProvisioningPercentage
|
||||
label: Storage Over Provisioning Percentage
|
||||
description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 200
|
||||
- variable: defaultSettings.storageMinimalAvailablePercentage
|
||||
label: Storage Minimal Available Percentage
|
||||
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 100
|
||||
default: 25
|
||||
- variable: defaultSettings.upgradeChecker
|
||||
label: Enable Upgrade Checker
|
||||
description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.defaultReplicaCount
|
||||
label: Default Replica Count
|
||||
description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 20
|
||||
default: 3
|
||||
- variable: defaultSettings.guaranteedEngineCPU
|
||||
label: Guaranteed Engine CPU
|
||||
description: "Allow Longhorn Instance Managers to have guaranteed CPU allocation. By default 0.25. The value is how many CPUs should be reserved for each Engine/Replica Instance Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Instance Manager Pods created after the setting took effect.
|
||||
In order to prevent unexpected volume crash, you can use the following formula to calculate an appropriate value for this setting:
|
||||
'Guaranteed Engine CPU = The estimated max Longhorn volume/replica count on a node * 0.1'.
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the volume/replica count now, you can leave it with the default value, or allocate 1/8 of total CPU of a node. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING: After this setting is changed, all the instance managers on all the nodes will be automatically restarted
|
||||
WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: float
|
||||
default: 0.25
|
||||
- variable: defaultSettings.defaultLonghornStaticStorageClass
|
||||
label: Default Longhorn Static StorageClass Name
|
||||
description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "longhorn-static"
|
||||
- variable: defaultSettings.backupstorePollInterval
|
||||
label: Backupstore Poll Interval
|
||||
description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 300
|
||||
- variable: defaultSettings.taintToleration
|
||||
label: Kubernetes Taint Toleration
|
||||
description: "To dedicate nodes to store Longhorn replicas and reject other general workloads, set tolerations for Longhorn and add taints for the storage nodes.
|
||||
All Longhorn volumes should be detached before modifying toleration settings.
|
||||
We recommend setting tolerations during Longhorn deployment because the Longhorn system cannot be operated during the update.
|
||||
Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example:
|
||||
* `key1=value1:NoSchedule; key2:NoExecute`
|
||||
* `:` this toleration tolerates everything because an empty key with operator `Exists` matches all keys, values and effects
|
||||
* `key1=value1:` this toleration has empty effect. It matches all effects with key `key1`
|
||||
Because `kubernetes.io` is used as the key of all Kubernetes default tolerations, it should not be used in the toleration settings.
|
||||
WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES!"
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: defaultSettings.priorityClass
|
||||
label: Priority Class
|
||||
description: "The name of the Priority Class to set on the Longhorn workloads. This can help prevent Longhorn workloads from being evicted under Node Pressure. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: defaultSettings.autoSalvage
|
||||
label: Automatic salvage
|
||||
description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
|
||||
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
|
||||
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
|
||||
If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
|
||||
**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.disableSchedulingOnCordonedNode
|
||||
label: Disable Scheduling On Cordoned Node
|
||||
description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.replicaZoneSoftAntiAffinity
|
||||
label: Replica Zone Level Soft Anti-Affinity
|
||||
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.volumeAttachmentRecoveryPolicy
|
||||
label: Volume Attachment Recovery Policy
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a Deployment Pod on a failed node. `wait` leads to the deletion of the volume attachment as soon as the pods deletion time has passed. `never` is the default Kubernetes behavior of never deleting volume attachments on terminating pods. `immediate` leads to the deletion of the volume attachment as soon as all workload pods are pending. By default wait."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "wait"
|
||||
- "never"
|
||||
- "immediate"
|
||||
default: "wait"
|
||||
- variable: defaultSettings.nodeDownPodDeletionPolicy
|
||||
label: Pod Deletion Policy When Node is Down
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
|
||||
- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
|
||||
- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "do-nothing"
|
||||
- "delete-statefulset-pod"
|
||||
- "delete-deployment-pod"
|
||||
- "delete-both-statefulset-and-deployment-pod"
|
||||
default: "do-nothing"
|
||||
- variable: defaultSettings.allowNodeDrainWithLastHealthyReplica
|
||||
label: Allow Node Drain with the Last Healthy Replica
|
||||
description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.
|
||||
If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.mkfsExt4Parameters
|
||||
label: Custom mkfs.ext4 parameters
|
||||
description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
- variable: defaultSettings.disableReplicaRebuild
|
||||
label: Disable Replica Rebuild
|
||||
description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.replicaReplenishmentWaitInterval
|
||||
label: Replica Replenishment Wait Interval
|
||||
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
|
||||
Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 600
|
||||
- variable: defaultSettings.disableRevisionCounter
|
||||
label: Disable Revision Counter
|
||||
description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.systemManagedPodsImagePullPolicy
|
||||
label: System Managed Pod Image Pull Policy
|
||||
description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "if-not-present"
|
||||
- "always"
|
||||
- "never"
|
||||
default: "if-not-present"
|
||||
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
|
||||
label: Allow Volume Creation with Degraded Availability
|
||||
description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
|
||||
label: Automatically Cleanup System Generated Snapshot
|
||||
description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: persistence.defaultClass
|
||||
default: "true"
|
||||
description: "Set as default StorageClass for Longhorn"
|
||||
label: Default Storage Class
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: boolean
|
||||
- variable: persistence.reclaimPolicy
|
||||
label: Storage Class Retain Policy
|
||||
description: "Define reclaim policy (Retain or Delete)"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: enum
|
||||
options:
|
||||
- "Delete"
|
||||
- "Retain"
|
||||
default: "Delete"
|
||||
- variable: persistence.defaultClassReplicaCount
|
||||
description: "Set replica count for Longhorn StorageClass"
|
||||
label: Default Storage Class Replica Count
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 10
|
||||
default: 3
|
||||
- variable: persistence.recurringJobs.enable
|
||||
description: "Enable recurring job for Longhorn StorageClass"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
label: Enable Storage Class Recurring Job
|
||||
type: boolean
|
||||
default: false
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: persistence.recurringJobs.jobList
|
||||
description: 'Recurring job list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,"labels": {"interval":"2m"}}]'
|
||||
label: Storage Class Recurring Job List
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: ingress.enabled
|
||||
default: "false"
|
||||
description: "Expose app using Layer 7 Load Balancer - ingress"
|
||||
type: boolean
|
||||
group: "Services and Load Balancing"
|
||||
label: Expose app using Layer 7 Load Balancer
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: ingress.host
|
||||
default: "xip.io"
|
||||
description: "layer 7 Load Balancer hostname"
|
||||
type: hostname
|
||||
required: true
|
||||
label: Layer 7 Load Balancer Hostname
|
||||
- variable: service.ui.type
|
||||
default: "Rancher-Proxy"
|
||||
description: "Define Longhorn UI service type"
|
||||
type: enum
|
||||
options:
|
||||
- "ClusterIP"
|
||||
- "NodePort"
|
||||
- "LoadBalancer"
|
||||
- "Rancher-Proxy"
|
||||
label: Longhorn UI Service
|
||||
show_if: "ingress.enabled=false"
|
||||
group: "Services and Load Balancing"
|
||||
show_subquestion_if: "NodePort"
|
||||
subquestions:
|
||||
- variable: service.ui.nodePort
|
||||
default: ""
|
||||
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
|
||||
type: int
|
||||
min: 30000
|
||||
max: 32767
|
||||
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
|
||||
label: UI Service NodePort number
|
||||
- variable: enablePSP
|
||||
default: "true"
|
||||
description: "Setup a pod security policy for Longhorn workloads."
|
||||
label: Pod Security Policy
|
||||
type: boolean
|
||||
group: "Other Settings"
|
|
@ -0,0 +1,5 @@
|
|||
Longhorn is now installed on the cluster!
|
||||
|
||||
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
|
||||
|
||||
Visit our documentation at https://longhorn.io/docs/
|
|
@ -0,0 +1,66 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "longhorn.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "longhorn.fullname" -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "longhorn.managerIP" -}}
|
||||
{{- $fullname := (include "longhorn.fullname" .) -}}
|
||||
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "secret" }}
|
||||
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
longhorn.labels generates the standard Helm labels.
|
||||
*/ -}}
|
||||
{{- define "longhorn.labels" -}}
|
||||
app.kubernetes.io/name: {{ template "longhorn.name" . }}
|
||||
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "registry_url" -}}
|
||||
{{- if .Values.privateRegistry.registryUrl -}}
|
||||
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
|
||||
{{- else -}}
|
||||
{{ include "system_default_registry" . }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
define the longhorn release namespace
|
||||
*/ -}}
|
||||
{{- define "release_namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: longhorn-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets", "statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["watch", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["longhorn.io"]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: longhorn-bind
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: longhorn-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
|
@ -0,0 +1,114 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-manager
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-manager
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-manager
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- daemon
|
||||
- --engine-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
|
||||
- --instance-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
|
||||
- --share-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --service-account
|
||||
- longhorn-service-account
|
||||
ports:
|
||||
- containerPort: 9500
|
||||
name: manager
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 9500
|
||||
volumeMounts:
|
||||
- name: dev
|
||||
mountPath: /host/dev/
|
||||
- name: proc
|
||||
mountPath: /host/proc/
|
||||
- name: varrun
|
||||
mountPath: /var/run/
|
||||
mountPropagation: Bidirectional
|
||||
- name: longhorn
|
||||
mountPath: /var/lib/longhorn/
|
||||
mountPropagation: Bidirectional
|
||||
- name: longhorn-default-setting
|
||||
mountPath: /var/lib/longhorn-setting/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: DEFAULT_SETTING_PATH
|
||||
value: /var/lib/longhorn-setting/default-setting.yaml
|
||||
volumes:
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev/
|
||||
- name: proc
|
||||
hostPath:
|
||||
path: /proc/
|
||||
- name: varrun
|
||||
hostPath:
|
||||
path: /var/run/
|
||||
- name: longhorn
|
||||
hostPath:
|
||||
path: /var/lib/longhorn/
|
||||
- name: longhorn-default-setting
|
||||
configMap:
|
||||
name: longhorn-default-setting
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: "100%"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-backend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.service.manager.type }}
|
||||
sessionAffinity: ClientIP
|
||||
selector:
|
||||
app: longhorn-manager
|
||||
ports:
|
||||
- name: manager
|
||||
port: 9500
|
||||
targetPort: manager
|
||||
{{- if .Values.service.manager.nodePort }}
|
||||
nodePort: {{ .Values.service.manager.nodePort }}
|
||||
{{- end }}
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-default-setting
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
default-setting.yaml: |-
|
||||
backup-target: {{ .Values.defaultSettings.backupTarget }}
|
||||
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
|
||||
allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}
|
||||
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
|
||||
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
|
||||
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
|
||||
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
|
||||
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
|
||||
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
|
||||
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
|
||||
default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}
|
||||
guaranteed-engine-cpu: {{ .Values.defaultSettings.guaranteedEngineCPU }}
|
||||
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
|
||||
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
|
||||
taint-toleration: {{ .Values.defaultSettings.taintToleration }}
|
||||
priority-class: {{ .Values.defaultSettings.priorityClass }}
|
||||
auto-salvage: {{ .Values.defaultSettings.autoSalvage }}
|
||||
auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}
|
||||
disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}
|
||||
replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}
|
||||
volume-attachment-recovery-policy: {{ .Values.defaultSettings.volumeAttachmentRecoveryPolicy }}
|
||||
node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}
|
||||
allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }}
|
||||
mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }}
|
||||
disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }}
|
||||
replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}
|
||||
disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}
|
||||
system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}
|
||||
allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}
|
||||
auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}
|
|
@ -0,0 +1,93 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: longhorn-driver-deployer
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-driver-deployer
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-driver-deployer
|
||||
spec:
|
||||
initContainers:
|
||||
- name: wait-longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
|
||||
containers:
|
||||
- name: longhorn-driver-deployer
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- deploy-driver
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --manager-url
|
||||
- http://longhorn-backend:9500/v1
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: SERVICE_ACCOUNT
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
{{- if .Values.csi.kubeletRootDir }}
|
||||
- name: KUBELET_ROOT_DIR
|
||||
value: {{ .Values.csi.kubeletRootDir }}
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
|
||||
- name: CSI_ATTACHER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
|
||||
- name: CSI_PROVISIONER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
|
||||
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
|
||||
- name: CSI_RESIZER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
|
||||
- name: CSI_SNAPSHOTTER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
|
||||
{{- end }}
|
||||
{{- if .Values.csi.attacherReplicaCount }}
|
||||
- name: CSI_ATTACHER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.attacherReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.provisionerReplicaCount }}
|
||||
- name: CSI_PROVISIONER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.provisionerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.resizerReplicaCount }}
|
||||
- name: CSI_RESIZER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.resizerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.snapshotterReplicaCount }}
|
||||
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
securityContext:
|
||||
runAsUser: 0
|
|
@ -0,0 +1,61 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
name: longhorn-ui
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-ui
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-ui
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-ui
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
env:
|
||||
- name: LONGHORN_MANAGER_IP
|
||||
value: "http://longhorn-backend:9500"
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
{{- end }}
|
||||
name: longhorn-frontend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
type: ClusterIP
|
||||
{{- else }}
|
||||
type: {{ .Values.service.ui.type }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: longhorn-ui
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
{{- if .Values.service.ui.nodePort }}
|
||||
nodePort: {{ .Values.service.ui.nodePort }}
|
||||
{{- else }}
|
||||
nodePort: null
|
||||
{{- end }}
|
|
@ -0,0 +1,31 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ingress
|
||||
annotations:
|
||||
{{- if .Values.ingress.tls }}
|
||||
ingress.kubernetes.io/secure-backends: "true"
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.ingress.annotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ .Values.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "" .Values.ingress.path }}
|
||||
backend:
|
||||
serviceName: longhorn-frontend
|
||||
servicePort: 80
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Values.ingress.host }}
|
||||
secretName: {{ .Values.ingress.tlsSecret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,35 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
|
||||
name: longhorn-post-upgrade
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-post-upgrade
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-post-upgrade
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- post-upgrade
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
|
@ -0,0 +1,66 @@
|
|||
{{- if .Values.enablePSP }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: longhorn-psp
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
requiredDropCapabilities:
|
||||
- NET_RAW
|
||||
allowedCapabilities:
|
||||
- SYS_ADMIN
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- secret
|
||||
- projected
|
||||
- hostPath
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: longhorn-psp-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- longhorn-psp
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: longhorn-psp-binding
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: longhorn-psp-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.privateRegistry.registrySecret }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: {{ template "secret" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-storageclass
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
storageclass.yaml: |
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: longhorn
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
|
||||
provisioner: driver.longhorn.io
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
|
||||
volumeBindingMode: Immediate
|
||||
parameters:
|
||||
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
|
||||
staleReplicaTimeout: "30"
|
||||
fromBackup: ""
|
||||
baseImage: ""
|
||||
{{- if .Values.persistence.recurringJobs.enable }}
|
||||
recurringJobs: '{{ .Values.persistence.recurringJobs.jobList }}'
|
||||
{{- end }}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: longhorn
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
name: longhorn-uninstall
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-uninstall
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-uninstall
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- uninstall
|
||||
- --force
|
||||
env:
|
||||
- name: LONGHORN_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-admin"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-edit"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-view"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status"]
|
||||
verbs: [ "get", "list", "watch" ]
|
|
@ -0,0 +1,21 @@
|
|||
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
|
||||
# {{- $found := dict -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
|
||||
# {{- range .Capabilities.APIVersions -}}
|
||||
# {{- if hasKey $found (toString .) -}}
|
||||
# {{- set $found (toString .) true -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
# {{- range $_, $exists := $found -}}
|
||||
# {{- if (eq $exists false) -}}
|
||||
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
#{{- end -}}
|
|
@ -0,0 +1,162 @@
|
|||
# Default values for longhorn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
repository: rancher/mirrored-longhornio-longhorn-engine
|
||||
tag: v1.1.0
|
||||
manager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-manager
|
||||
tag: v1.1.0
|
||||
ui:
|
||||
repository: rancher/mirrored-longhornio-longhorn-ui
|
||||
tag: v1.1.0
|
||||
instanceManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-instance-manager
|
||||
tag: v1_20201216
|
||||
shareManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-share-manager
|
||||
tag: v1_20201204
|
||||
csi:
|
||||
attacher:
|
||||
repository: rancher/mirrored-longhornio-csi-attacher
|
||||
tag: v2.2.1-lh1
|
||||
provisioner:
|
||||
repository: rancher/mirrored-longhornio-csi-provisioner
|
||||
tag: v1.6.0-lh1
|
||||
nodeDriverRegistrar:
|
||||
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
|
||||
tag: v1.2.0-lh1
|
||||
resizer:
|
||||
repository: rancher/mirrored-longhornio-csi-resizer
|
||||
tag: v0.5.1-lh1
|
||||
snapshotter:
|
||||
repository: rancher/mirrored-longhornio-csi-snapshotter
|
||||
tag: v2.1.1-lh1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: ClusterIP
|
||||
nodePort: null
|
||||
manager:
|
||||
type: ClusterIP
|
||||
nodePort: ""
|
||||
|
||||
persistence:
|
||||
defaultClass: true
|
||||
defaultClassReplicaCount: 3
|
||||
reclaimPolicy: Delete
|
||||
recurringJobs:
|
||||
enable: false
|
||||
jobList: []
|
||||
|
||||
csi:
|
||||
kubeletRootDir: ~
|
||||
attacherReplicaCount: ~
|
||||
provisionerReplicaCount: ~
|
||||
resizerReplicaCount: ~
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
backupTarget: ~
|
||||
backupTargetCredentialSecret: ~
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
defaultDataPath: ~
|
||||
defaultDataLocality: ~
|
||||
replicaSoftAntiAffinity: ~
|
||||
storageOverProvisioningPercentage: ~
|
||||
storageMinimalAvailablePercentage: ~
|
||||
upgradeChecker: ~
|
||||
defaultReplicaCount: ~
|
||||
guaranteedEngineCPU: ~
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
backupstorePollInterval: ~
|
||||
taintToleration: ~
|
||||
priorityClass: ~
|
||||
autoSalvage: ~
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
volumeAttachmentRecoveryPolicy: ~
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
allowNodeDrainWithLastHealthyReplica: ~
|
||||
mkfsExt4Parameters: ~
|
||||
disableReplicaRebuild: ~
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
disableRevisionCounter: ~
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
|
||||
privateRegistry:
|
||||
registryUrl: ~
|
||||
registryUser: ~
|
||||
registryPasswd: ~
|
||||
registrySecret: ~
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
#
|
||||
|
||||
ingress:
|
||||
## Set to true to enable ingress record generation
|
||||
enabled: false
|
||||
|
||||
|
||||
host: xip.io
|
||||
|
||||
## Set this to true in order to enable TLS on the ingress record
|
||||
## A side effect of this will be that the backend service will be connected at port 443
|
||||
tls: false
|
||||
|
||||
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
## Ingress annotations done as key:value pairs
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## -----BEGIN RSA PRIVATE KEY-----
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
# - name: longhorn.local-tls
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# Configure a pod security policy in the Longhorn namespace to allow privileged pods
|
||||
enablePSP: true
|
||||
|
||||
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
|
||||
## and its release namespace is not the `longhorn-system`
|
||||
namespaceOverride: ""
|
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: cattle-resources-system
|
||||
catalog.cattle.io/release-name: rancher-backup-crd
|
||||
apiVersion: v1
|
||||
description: Installs the CRDs for rancher-backup.
|
||||
name: rancher-backup-crd
|
||||
type: application
|
||||
version: 1.0.301
|
|
@ -0,0 +1,2 @@
|
|||
# rancher-backup-crd
|
||||
A Rancher chart that installs the CRDs used by rancher-backup.
|
|
@ -0,0 +1,119 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backups.resources.cattle.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.storageLocation
|
||||
name: Location
|
||||
type: string
|
||||
- JSONPath: .status.backupType
|
||||
name: Type
|
||||
type: string
|
||||
- JSONPath: .status.filename
|
||||
name: Latest-Backup
|
||||
type: string
|
||||
- JSONPath: .spec.resourceSetName
|
||||
name: ResourceSet
|
||||
type: string
|
||||
- JSONPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- JSONPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
group: resources.cattle.io
|
||||
names:
|
||||
kind: Backup
|
||||
plural: backups
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
encryptionConfigSecretName:
|
||||
description: Name of the Secret containing the encryption config
|
||||
type: string
|
||||
resourceSetName:
|
||||
description: Name of the ResourceSet CR to use for backup
|
||||
type: string
|
||||
retentionCount:
|
||||
minimum: 1
|
||||
type: integer
|
||||
schedule:
|
||||
description: Cron schedule for recurring backups
|
||||
example:
|
||||
Descriptors: '@midnight'
|
||||
Standard crontab specs: 0 0 * * *
|
||||
type: string
|
||||
storageLocation:
|
||||
nullable: true
|
||||
properties:
|
||||
s3:
|
||||
nullable: true
|
||||
properties:
|
||||
bucketName:
|
||||
type: string
|
||||
credentialSecretName:
|
||||
type: string
|
||||
credentialSecretNamespace:
|
||||
type: string
|
||||
endpoint:
|
||||
type: string
|
||||
endpointCA:
|
||||
type: string
|
||||
folder:
|
||||
type: string
|
||||
insecureTLSSkipVerify:
|
||||
type: boolean
|
||||
region:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- resourceSetName
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
backupType:
|
||||
type: string
|
||||
conditions:
|
||||
items:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
reason:
|
||||
type: string
|
||||
status:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
filename:
|
||||
type: string
|
||||
lastSnapshotTs:
|
||||
type: string
|
||||
nextSnapshotAt:
|
||||
type: string
|
||||
observedGeneration:
|
||||
type: integer
|
||||
storageLocation:
|
||||
type: string
|
||||
summary:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
|
@ -0,0 +1,94 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: resourcesets.resources.cattle.io
|
||||
spec:
|
||||
group: resources.cattle.io
|
||||
names:
|
||||
kind: ResourceSet
|
||||
plural: resourcesets
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
controllerReferences:
|
||||
items:
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
resource:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
resourceSelectors:
|
||||
items:
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kinds:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
kindsRegexp:
|
||||
type: string
|
||||
labelSelectors:
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
items:
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
type: object
|
||||
namespaceRegexp:
|
||||
type: string
|
||||
namespaces:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
resourceNameRegexp:
|
||||
type: string
|
||||
resourceNames:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
nullable: true
|
||||
required:
|
||||
- apiVersion
|
||||
type: array
|
||||
required:
|
||||
- resourceSelectors
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
|
@ -0,0 +1,102 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: restores.resources.cattle.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.backupSource
|
||||
name: Backup-Source
|
||||
type: string
|
||||
- JSONPath: .spec.backupFilename
|
||||
name: Backup-File
|
||||
type: string
|
||||
- JSONPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- JSONPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
group: resources.cattle.io
|
||||
names:
|
||||
kind: Restore
|
||||
plural: restores
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
backupFilename:
|
||||
type: string
|
||||
deleteTimeoutSeconds:
|
||||
maximum: 10
|
||||
type: integer
|
||||
encryptionConfigSecretName:
|
||||
type: string
|
||||
prune:
|
||||
nullable: true
|
||||
type: boolean
|
||||
storageLocation:
|
||||
nullable: true
|
||||
properties:
|
||||
s3:
|
||||
nullable: true
|
||||
properties:
|
||||
bucketName:
|
||||
type: string
|
||||
credentialSecretName:
|
||||
type: string
|
||||
credentialSecretNamespace:
|
||||
type: string
|
||||
endpoint:
|
||||
type: string
|
||||
endpointCA:
|
||||
type: string
|
||||
folder:
|
||||
type: string
|
||||
insecureTLSSkipVerify:
|
||||
type: boolean
|
||||
region:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- backupFilename
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
backupSource:
|
||||
type: string
|
||||
conditions:
|
||||
items:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
reason:
|
||||
type: string
|
||||
status:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
observedGeneration:
|
||||
type: integer
|
||||
restoreCompletionTs:
|
||||
type: string
|
||||
summary:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
|
@ -0,0 +1,20 @@
|
|||
annotations:
|
||||
catalog.cattle.io/auto-install: rancher-backup-crd=match
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Rancher Backups
|
||||
catalog.cattle.io/namespace: cattle-resources-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1
|
||||
catalog.cattle.io/release-name: rancher-backup
|
||||
catalog.cattle.io/scope: management
|
||||
catalog.cattle.io/ui-component: rancher-backup
|
||||
apiVersion: v1
|
||||
appVersion: v1.0.3
|
||||
description: Provides ability to back up and restore the Rancher application running
|
||||
on any Kubernetes cluster
|
||||
icon: https://charts.rancher.io/assets/logos/backup-restore.svg
|
||||
keywords:
|
||||
- applications
|
||||
- infrastructure
|
||||
name: rancher-backup
|
||||
version: 1.0.301
|
|
@ -0,0 +1,69 @@
|
|||
# Rancher Backup
|
||||
|
||||
This chart provides ability to back up and restore the Rancher application running on any Kubernetes cluster.
|
||||
|
||||
Refer [this](https://github.com/rancher/backup-restore-operator) repository for implementation details.
|
||||
|
||||
-----
|
||||
|
||||
### Get Repo Info
|
||||
```
|
||||
helm repo add rancher-chart https://charts.rancher.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
-----
|
||||
|
||||
### Install Chart
|
||||
```
|
||||
helm install rancher-backup-crd rancher-chart/rancher-backup-crd -n cattle-resources-system --create-namespace
|
||||
helm install rancher-backup rancher-chart/rancher-backup -n cattle-resources-system
|
||||
```
|
||||
|
||||
-----
|
||||
|
||||
### Configuration
|
||||
The following table lists the configurable parameters of the rancher-backup chart and their default values:
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------|---------------|-------|
|
||||
| image.repository | Container image repository | rancher/backup-restore-operator |
|
||||
| image.tag | Container image tag | v0.1.0-rc1 |
|
||||
| s3.enabled | Configure S3 compatible default storage location. Current version supports S3 and MinIO | false |
|
||||
| s3.credentialSecretName | Name of the Secret containing S3 credentials. This is an optional field. Skip this field in order to use IAM Role authentication. The Secret must contain following two keys, `accessKey` and `secretKey` | "" |
|
||||
| s3.credentialSecretNamespace | Namespace of the Secret containing S3 credentials. This can be any namespace. | "" |
|
||||
| s3.region | Region of the S3 Bucket (Required for S3, not valid for MinIO) | "" |
|
||||
| s3.bucketName | Name of the Bucket | "" |
|
||||
| s3.folder | Base folder within the Bucket (optional) | "" |
|
||||
| s3.endpoint | Endpoint for the S3 storage provider | "" |
|
||||
| s3.endpointCA | Base64 encoded CA cert for the S3 storage provider (optional) | "" |
|
||||
| s3.insecureTLSSkipVerify | Skip SSL verification | false |
|
||||
| persistence.enabled | Configure a Persistent Volume as the default storage location. It accepts either a StorageClass name to create a PVC, or directly accepts the PV to use. The Persistent Volume is mounted at `/var/lib/backups` in the operator pod | false |
|
||||
| persistence.storageClass | StorageClass to use for dynamically provisioning the Persistent Volume, which will be used for storing backups | "" |
|
||||
| persistence.volumeName | Persistent Volume to use for storing backups | "" |
|
||||
| persistence.size | Requested size of the Persistent Volume (Applicable when using dynamic provisioning) | "" |
|
||||
| nodeSelector | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | {} |
|
||||
| tolerations | https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration | [] |
|
||||
| affinity | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity | {} |
|
||||
|
||||
-----
|
||||
|
||||
### CRDs
|
||||
|
||||
Refer [this](https://github.com/rancher/backup-restore-operator#crds) section for information on CRDs that this chart installs. Also refer [this](https://github.com/rancher/backup-restore-operator/tree/master/examples) folder containing sample manifests for the CRDs.
|
||||
|
||||
-----
|
||||
### Upgrading Chart
|
||||
```
|
||||
helm upgrade rancher-backup-crd -n cattle-resources-system
|
||||
helm upgrade rancher-backup -n cattle-resources-system
|
||||
```
|
||||
|
||||
-----
|
||||
### Uninstall Chart
|
||||
|
||||
```
|
||||
helm uninstall rancher-backup -n cattle-resources-system
|
||||
helm uninstall rancher-backup-crd -n cattle-resources-system
|
||||
```
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Rancher Backup
|
||||
|
||||
This chart enables ability to capture backups of the Rancher application and restore from these backups. This chart can be used to migrate Rancher from one Kubernetes cluster to a different Kubernetes cluster.
|
||||
|
||||
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/backups/v2.5/).
|
||||
|
||||
This chart installs the following components:
|
||||
|
||||
- [backup-restore-operator](https://github.com/rancher/backup-restore-operator)
|
||||
- The operator handles backing up all Kubernetes resources and CRDs that Rancher creates and manages from the local cluster. It gathers these resources by querying the Kubernetes API server, packages all the resources to create a tarball file and saves it in the configured backup storage location.
|
||||
- The operator can be configured to store backups in S3-compatible object stores such as AWS S3 and MinIO, and in persistent volumes. During deployment, you can create a default storage location, but there is always the option to override the default storage location with each backup, but will be limited to using an S3-compatible object store.
|
||||
- It preserves the ownerReferences on all resources, hence maintaining dependencies between objects.
|
||||
- This operator provides encryption support, to encrypt user specified resources before saving them in the backup file. It uses the same encryption configuration that is used to enable [Kubernetes Encryption at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
- Backup - A backup is a CRD (`Backup`) that defines when to take backups, where to store the backup and what encryption to use (optional). Backups can be taken ad hoc or scheduled to be taken in intervals.
|
||||
- Restore - A restore is a CRD (`Restore`) that defines which backup to use to restore the Rancher application to.
|
|
@ -0,0 +1,76 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "backupRestore.fullname" -}}
|
||||
{{- .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "backupRestore.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "backupRestore.labels" -}}
|
||||
helm.sh/chart: {{ include "backupRestore.chart" . }}
|
||||
{{ include "backupRestore.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "backupRestore.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "backupRestore.fullname" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
resources.cattle.io/operator: backup-restore
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "backupRestore.serviceAccountName" -}}
|
||||
{{ include "backupRestore.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- define "backupRestore.s3SecretName" -}}
|
||||
{{- printf "%s-%s" .Chart.Name "s3" | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create PVC name using release and revision number.
|
||||
*/}}
|
||||
{{- define "backupRestore.pvcName" -}}
|
||||
{{- printf "%s-%d" .Release.Name .Release.Revision }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "backupRestore.fullname" . }}
|
||||
labels:
|
||||
{{- include "backupRestore.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "backupRestore.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,59 @@
|
|||
{{- if and .Values.s3.enabled .Values.persistence.enabled }}
|
||||
{{- fail "\n\nCannot configure both s3 and PV for storing backups" }}
|
||||
{{- end }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "backupRestore.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "backupRestore.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "backupRestore.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "backupRestore.selectorLabels" . | nindent 8 }}
|
||||
annotations:
|
||||
checksum/s3: {{ include (print $.Template.BasePath "/s3-secret.yaml") . | sha256sum }}
|
||||
checksum/pvc: {{ include (print $.Template.BasePath "/pvc.yaml") . | sha256sum }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "backupRestore.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: CHART_NAMESPACE
|
||||
value: {{ .Release.Namespace }}
|
||||
{{- if .Values.s3.enabled }}
|
||||
- name: DEFAULT_S3_BACKUP_STORAGE_LOCATION
|
||||
value: {{ include "backupRestore.s3SecretName" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
- name: DEFAULT_PERSISTENCE_ENABLED
|
||||
value: "persistence-enabled"
|
||||
volumeMounts:
|
||||
- mountPath: "/var/lib/backups"
|
||||
name: pv-storage
|
||||
volumes:
|
||||
- name: pv-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "backupRestore.pvcName" . }}
|
||||
{{- end }}
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
{{- with .Values.nodeSelector }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
tolerations:
|
||||
{{- include "linux-node-tolerations" . | nindent 8}}
|
||||
{{- with .Values.tolerations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,27 @@
|
|||
{{- if and .Values.persistence.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ include "backupRestore.pvcName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "backupRestore.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
{{- with .Values.persistence }}
|
||||
requests:
|
||||
storage: {{ .size | quote }}
|
||||
{{- if .storageClass }}
|
||||
{{- if (eq "-" .storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: {{ .storageClass | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .volumeName }}
|
||||
volumeName: {{ .volumeName | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,62 @@
|
|||
apiVersion: resources.cattle.io/v1
|
||||
kind: ResourceSet
|
||||
metadata:
|
||||
name: rancher-resource-set
|
||||
resourceSelectors:
|
||||
- apiVersion: "v1"
|
||||
kindsRegexp: "^namespaces$"
|
||||
resourceNameRegexp: "^cattle-|^p-|^c-|^user-|^u-"
|
||||
resourceNames:
|
||||
- "local"
|
||||
- apiVersion: "v1"
|
||||
kindsRegexp: "^secrets$"
|
||||
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
|
||||
labelSelectors:
|
||||
matchExpressions:
|
||||
- key: "owner"
|
||||
operator: "NotIn"
|
||||
values: ["helm"]
|
||||
- apiVersion: "v1"
|
||||
kindsRegexp: "^serviceaccounts$"
|
||||
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
|
||||
- apiVersion: "v1"
|
||||
kindsRegexp: "^configmaps$"
|
||||
namespaces:
|
||||
- "cattle-system"
|
||||
- apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kindsRegexp: "^roles$|^rolebindings$"
|
||||
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
|
||||
- apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kindsRegexp: "^clusterrolebindings$"
|
||||
resourceNameRegexp: "^cattle-|^clusterrolebinding-|^globaladmin-user-|^grb-u-"
|
||||
resourceNames:
|
||||
- "eks-operator"
|
||||
- apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kindsRegexp: "^clusterroles$"
|
||||
resourceNameRegexp: "^cattle-|^p-|^c-|^local-|^user-|^u-|^project-|^create-ns$"
|
||||
resourceNames:
|
||||
- "eks-operator"
|
||||
- apiVersion: "apiextensions.k8s.io/v1beta1"
|
||||
kindsRegexp: "."
|
||||
resourceNameRegexp: "management.cattle.io$|project.cattle.io$|catalog.cattle.io$|eks.cattle.io$|resources.cattle.io$"
|
||||
- apiVersion: "management.cattle.io/v3"
|
||||
kindsRegexp: "."
|
||||
- apiVersion: "project.cattle.io/v3"
|
||||
kindsRegexp: "."
|
||||
- apiVersion: "catalog.cattle.io/v1"
|
||||
kindsRegexp: "^clusterrepos$"
|
||||
- apiVersion: "resources.cattle.io/v1"
|
||||
kindsRegexp: "^ResourceSet$"
|
||||
- apiVersion: "eks.cattle.io/v1"
|
||||
kindsRegexp: "."
|
||||
- apiVersion: "apps/v1"
|
||||
kindsRegexp: "^deployments$"
|
||||
resourceNames:
|
||||
- "eks-config-operator"
|
||||
namespaces:
|
||||
- "cattle-system"
|
||||
controllerReferences:
|
||||
- apiVersion: "apps/v1"
|
||||
resource: "deployments"
|
||||
name: "rancher"
|
||||
namespace: "cattle-system"
|
|
@ -0,0 +1,31 @@
|
|||
{{- if .Values.s3.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "backupRestore.s3SecretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "backupRestore.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
{{- with .Values.s3 }}
|
||||
{{- if .credentialSecretName }}
|
||||
credentialSecretName: {{ .credentialSecretName }}
|
||||
credentialSecretNamespace: {{ required "When providing a Secret containing S3 credentials, a valid .Values.credentialSecretNamespace must be provided" .credentialSecretNamespace }}
|
||||
{{- end }}
|
||||
{{- if .region }}
|
||||
region: {{ .region }}
|
||||
{{- end }}
|
||||
bucketName: {{ required "A valid .Values.bucketName is required for configuring S3 compatible storage as the default backup storage location" .bucketName }}
|
||||
{{- if .folder }}
|
||||
folder: {{ .folder }}
|
||||
{{- end }}
|
||||
endpoint: {{ required "A valid .Values.endpoint is required for configuring S3 compatible storage as the default backup storage location" .endpoint }}
|
||||
{{- if .endpointCA }}
|
||||
endpointCA: {{ .endpointCA }}
|
||||
{{- end }}
|
||||
{{- if .insecureTLSSkipVerify }}
|
||||
insecureTLSSkipVerify: {{ .insecureTLSSkipVerify | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ end }}
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "backupRestore.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "backupRestore.labels" . | nindent 4 }}
|
|
@ -0,0 +1,16 @@
|
|||
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
|
||||
# {{- $found := dict -}}
|
||||
# {{- set $found "resources.cattle.io/v1/Backup" false -}}
|
||||
# {{- set $found "resources.cattle.io/v1/ResourceSet" false -}}
|
||||
# {{- set $found "resources.cattle.io/v1/Restore" false -}}
|
||||
# {{- range .Capabilities.APIVersions -}}
|
||||
# {{- if hasKey $found (toString .) -}}
|
||||
# {{- set $found (toString .) true -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
# {{- range $_, $exists := $found -}}
|
||||
# {{- if (eq $exists false) -}}
|
||||
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
#{{- end -}}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue