Merge pull request #3274 from rancher/oob-v2.8

[release-v2.8] Releasing v2.7 forward-ported charts into v2.8
pull/3353/head
Lucas Machado 2023-12-18 13:47:10 -03:00 committed by GitHub
commit cb953fafd4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
144 changed files with 25967 additions and 2 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.27.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.8.1
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 102.2.1+up0.8.1

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,12 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}"
}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-agent
spec:
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
serviceAccountName: fleet-agent
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,63 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.8.1
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
#labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.21.5
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.8.1
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 102.2.1+up0.8.1

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.27.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.8.1
dependencies:
- condition: gitops.enabled
name: gitjob
repository: file://./charts/gitjob
description: Fleet Manager - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
version: 102.2.1+up0.8.1

View File

@ -0,0 +1,30 @@
# Fleet Helm Chart
Fleet is GitOps at scale. Fleet is designed to manage multiple clusters.
## What is Fleet?
* Cluster engine: Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Fleet focuses not only on the ability to scale, but it also gives users a high degree of control and visibility to monitor exactly what is installed on the cluster.
* Deployment management: Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, Kustomize, or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy all resources in the cluster. As a result, users can enjoy a high degree of control, consistency, and auditability of their clusters.
## Introduction
This chart deploys Fleet on a Kubernetes cluster. It also deploys some of its dependencies as subcharts.
The documentation is centralized in the [doc website](https://fleet.rancher.io/).
## Prerequisites
Get helm if you don't have it. Helm 3 is just a CLI.
## Install Fleet
Install the Fleet Helm charts (there are two because we separate out CRDs for ultimate flexibility.):
```
$ helm repo add fleet https://rancher.github.io/fleet-helm-charts/
$ helm -n cattle-fleet-system install --create-namespace --wait fleet-crd fleet/fleet-crd
$ helm -n cattle-fleet-system install --create-namespace --wait fleet fleet/fleet
```

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
appVersion: 0.1.76-security1
description: Controller that run jobs based on git events
name: gitjob
version: 0.1.76-security1

View File

@ -0,0 +1,7 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,38 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gitjob
rules:
- apiGroups:
- "batch"
resources:
- 'jobs'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'pods'
verbs:
- 'list'
- 'get'
- 'watch'
- apiGroups:
- ""
resources:
- 'secrets'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'configmaps'
verbs:
- '*'
- apiGroups:
- "gitjob.cattle.io"
resources:
- "gitjobs"
- "gitjobs/status"
verbs:
- "*"

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitjob-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitjob
spec:
selector:
matchLabels:
app: "gitjob"
template:
metadata:
labels:
app: "gitjob"
spec:
serviceAccountName: gitjob
containers:
- image: "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
name: gitjob
args:
{{- if .Values.debug }}
- --debug
{{- end }}
- --tekton-image
- "{{ template "system_default_registry" . }}{{ .Values.tekton.repository }}:{{ .Values.tekton.tag }}"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}

View File

@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitjob
rules:
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitjob
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: gitjob
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: "gitjob"

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitjob

View File

@ -0,0 +1,31 @@
gitjob:
repository: rancher/gitjob
tag: v0.1.76-security1
tekton:
repository: rancher/tekton-utils
tag: v0.1.37
global:
cattle:
systemDefaultRegistry: ""
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
# PriorityClassName assigned to deployment.
priorityClassName: ""
debug: false

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fleet-controller
data:
config: |
{
"systemDefaultRegistry": "{{ template "system_default_registry" . }}",
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
"apiServerURL": "{{.Values.apiServerURL}}",
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
"bootstrap": {
"paths": "{{.Values.bootstrap.paths}}",
"repo": "{{.Values.bootstrap.repo}}",
"secret": "{{.Values.bootstrap.secret}}",
"branch": "{{.Values.bootstrap.branch}}",
"namespace": "{{.Values.bootstrap.namespace}}",
"agentNamespace": "{{.Values.bootstrap.agentNamespace}}",
},
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
}

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-controller
spec:
selector:
matchLabels:
app: fleet-controller
template:
metadata:
labels:
app: fleet-controller
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS
value: {{ quote .Values.propagateDebugSettingsToAgents }}
{{- if .Values.clusterEnqueueDelay }}
- name: FLEET_CLUSTER_ENQUEUE_DELAY
value: {{ .Values.clusterEnqueueDelay }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_DIR
value: /tmp/pprof/
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_PERIOD
value: {{ quote .Values.cpuPprof.period }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
name: fleet-controller
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
command:
- fleetcontroller
{{- if not .Values.gitops.enabled }}
- --disable-gitops
{{- end }}
{{- if not .Values.bootstrap.enabled }}
- --disable-bootstrap
{{- end }}
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
{{- if .Values.cpuPprof }}
- mountPath: /tmp/pprof
name: pprof
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- if .Values.cpuPprof }}
- name: pprof {{ toYaml .Values.cpuPprof.volumeConfiguration | nindent 10 }}
{{- end }}
serviceAccountName: fleet-controller
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.migrations.clusterRegistrationCleanup }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: fleet-cleanup-clusterregistrations
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: fleet-controller
restartPolicy: Never
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command:
- fleet
args:
- cleanup
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,114 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller
rules:
- apiGroups:
- gitjob.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- fleet.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: fleet-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller-bootstrap
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller-bootstrap
subjects:
- kind: ServiceAccount
name: fleet-controller-bootstrap
namespace: {{.Release.Namespace}}
{{- end }}

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller-bootstrap
{{- end }}

View File

@ -0,0 +1,83 @@
image:
repository: rancher/fleet
tag: v0.8.1
imagePullPolicy: IfNotPresent
agentImage:
repository: rancher/fleet-agent
tag: v0.8.1
imagePullPolicy: IfNotPresent
# For cluster registration the public URL of the Kubernetes API server must be set here
# Example: https://example.com:6443
apiServerURL: ""
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# A duration string for how often agents should report a heartbeat
agentCheckinInterval: "15m"
# Whether you want to allow cluster upon registration to specify their labels.
ignoreClusterRegistrationLabels: false
# Counts from gitrepo are out of sync with bundleDeployment state.
# Just retry in a number of seconds as there is no great way to trigger an event that doesn't cause a loop.
# If not set default is 15 seconds.
# clusterEnqueueDelay: 120s
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
bootstrap:
enabled: true
# The namespace that will be autocreated and the local cluster will be registered in
namespace: fleet-local
# The namespace where the fleet agent for the local cluster will be ran, if empty
# this will default to cattle-fleet-system
agentNamespace: ""
# A repo to add at install time that will deploy to the local cluster. This allows
# one to fully bootstrap fleet, its configuration and all its downstream clusters
# in one shot.
repo: ""
secret: ""
branch: master
paths: ""
global:
cattle:
systemDefaultRegistry: ""
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""
gitops:
enabled: true
debug: false
debugLevel: 0
propagateDebugSettingsToAgents: true
## Optional CPU pprof configuration. Profiles are collected continuously and saved every period
## Any valid volume configuration can be provided, the example below uses hostPath
#cpuPprof:
# period: "60s"
# volumeConfiguration:
# hostPath:
# path: /tmp/pprof
# type: DirectoryOrCreate
migrations:
clusterRegistrationCleanup: true

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/release-name: longhorn-crd
apiVersion: v1
appVersion: v1.5.3
description: Installs the CRDs for longhorn.
name: longhorn-crd
type: application
version: 102.3.1+up1.5.3

View File

@ -0,0 +1,2 @@
# longhorn-crd
A Rancher chart that installs the CRDs used by longhorn.

View File

@ -0,0 +1,66 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "longhorn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "longhorn.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "longhorn.managerIP" -}}
{{- $fullname := (include "longhorn.fullname" .) -}}
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "secret" }}
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
{{- end }}
{{- /*
longhorn.labels generates the standard Helm labels.
*/ -}}
{{- define "longhorn.labels" -}}
app.kubernetes.io/name: {{ template "longhorn.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.privateRegistry.registryUrl -}}
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
{{- else -}}
{{ include "system_default_registry" . }}
{{- end -}}
{{- end -}}
{{- /*
define the longhorn release namespace
*/ -}}
{{- define "release_namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,40 @@
annotations:
catalog.cattle.io/auto-install: longhorn-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Longhorn
catalog.cattle.io/kube-version: '>= 1.21.0-0'
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: longhorn
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.5.3
apiVersion: v1
appVersion: v1.5.3
description: Longhorn is a distributed block storage system for Kubernetes.
home: https://github.com/longhorn/longhorn
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
keywords:
- longhorn
- storage
- distributed
- block
- device
- iscsi
- nfs
kubeVersion: '>=1.21.0-0'
maintainers:
- email: maintainers@longhorn.io
name: Longhorn maintainers
name: longhorn
sources:
- https://github.com/longhorn/longhorn
- https://github.com/longhorn/longhorn-engine
- https://github.com/longhorn/longhorn-instance-manager
- https://github.com/longhorn/longhorn-share-manager
- https://github.com/longhorn/longhorn-manager
- https://github.com/longhorn/longhorn-ui
- https://github.com/longhorn/longhorn-tests
- https://github.com/longhorn/backing-image-manager
version: 102.3.1+up1.5.3

View File

@ -0,0 +1,49 @@
# Longhorn Chart
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
## Source Code
Longhorn is 100% open source software. Project source code is spread across a number of repos:
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
## Prerequisites
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
2. Kubernetes >= v1.21
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Uninstallation
To prevent Longhorn from being accidentally uninstalled (which leads to data lost), we introduce a new setting, deleting-confirmation-flag. If this flag is **false**, the Longhorn uninstallation job will fail. Set this flag to **true** to allow Longhorn uninstallation. You can set this flag using setting page in Longhorn UI or `kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag`
To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
---
Please see [link](https://github.com/longhorn/longhorn) for more information.

View File

@ -0,0 +1,27 @@
# Longhorn
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,890 @@
categories:
- storage
namespace: longhorn-system
questions:
- variable: image.defaultImage
default: "true"
description: "Use default Longhorn images"
label: Use Default Images
type: boolean
show_subquestion_if: false
group: "Longhorn Images"
subquestions:
- variable: image.longhorn.manager.repository
default: rancher/mirrored-longhornio-longhorn-manager
description: "Specify Longhorn Manager Image Repository"
type: string
label: Longhorn Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.manager.tag
default: v1.5.3
description: "Specify Longhorn Manager Image Tag"
type: string
label: Longhorn Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.repository
default: rancher/mirrored-longhornio-longhorn-engine
description: "Specify Longhorn Engine Image Repository"
type: string
label: Longhorn Engine Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.tag
default: v1.5.3
description: "Specify Longhorn Engine Image Tag"
type: string
label: Longhorn Engine Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.repository
default: rancher/mirrored-longhornio-longhorn-ui
description: "Specify Longhorn UI Image Repository"
type: string
label: Longhorn UI Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.tag
default: v1.5.3
description: "Specify Longhorn UI Image Tag"
type: string
label: Longhorn UI Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.repository
default: rancher/mirrored-longhornio-longhorn-instance-manager
description: "Specify Longhorn Instance Manager Image Repository"
type: string
label: Longhorn Instance Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.tag
default: v1.5.3
description: "Specify Longhorn Instance Manager Image Tag"
type: string
label: Longhorn Instance Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.repository
default: rancher/mirrored-longhornio-longhorn-share-manager
description: "Specify Longhorn Share Manager Image Repository"
type: string
label: Longhorn Share Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.tag
default: v1.5.3
description: "Specify Longhorn Share Manager Image Tag"
type: string
label: Longhorn Share Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.repository
default: rancher/mirrored-longhornio-backing-image-manager
description: "Specify Longhorn Backing Image Manager Image Repository"
type: string
label: Longhorn Backing Image Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.tag
default: v1.5.3
description: "Specify Longhorn Backing Image Manager Image Tag"
type: string
label: Longhorn Backing Image Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.repository
default: rancher/mirrored-longhornio-support-bundle-kit
description: "Specify Longhorn Support Bundle Manager Image Repository"
type: string
label: Longhorn Support Bundle Kit Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.tag
default: v0.0.27
description: "Specify Longhorn Support Bundle Manager Image Tag"
type: string
label: Longhorn Support Bundle Kit Image Tag
group: "Longhorn Images Settings"
- variable: image.csi.attacher.repository
default: rancher/mirrored-longhornio-csi-attacher
description: "Specify CSI attacher image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Attacher Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.attacher.tag
default: v4.2.0
description: "Specify CSI attacher image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Attacher Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.repository
default: rancher/mirrored-longhornio-csi-provisioner
description: "Specify CSI provisioner image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Provisioner Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.tag
default: v3.4.1
description: "Specify CSI provisioner image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Provisioner Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.repository
default: rancher/mirrored-longhornio-csi-node-driver-registrar
description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Node Driver Registrar Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.tag
default: v2.7.0
description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Node Driver Registrar Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.repository
default: rancher/mirrored-longhornio-csi-resizer
description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Driver Resizer Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.tag
default: v1.7.0
description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Driver Resizer Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.repository
default: rancher/mirrored-longhornio-csi-snapshotter
description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Driver Snapshotter Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.tag
default: v6.2.1
description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Driver Snapshotter Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.repository
default: rancher/mirrored-longhornio-livenessprobe
description: "Specify CSI liveness probe image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Liveness Probe Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.tag
default: v2.9.0
description: "Specify CSI liveness probe image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Liveness Probe Image Tag
group: "Longhorn CSI Driver Images"
- variable: privateRegistry.registryUrl
label: Private registry URL
description: "URL of private registry. Leave blank to apply system default registry."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.registrySecret
label: Private registry secret name
description: "If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.createSecret
default: "true"
description: "Create a new private registry secret"
type: boolean
group: "Private Registry Settings"
label: Create Secret for Private Registry Settings
show_subquestion_if: true
subquestions:
- variable: privateRegistry.registryUser
label: Private registry user
description: "User used to authenticate to private registry."
type: string
default: ""
- variable: privateRegistry.registryPasswd
label: Private registry password
description: "Password used to authenticate to private registry."
type: password
default: ""
- variable: longhorn.default_setting
default: "false"
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
label: "Customize Default Settings"
type: boolean
show_subquestion_if: true
group: "Longhorn Default Settings"
subquestions:
- variable: csi.kubeletRootDir
default:
description: "Specify kubelet root-dir. Leave blank to autodetect."
type: string
label: Kubelet Root Directory
group: "Longhorn CSI Driver Settings"
- variable: csi.attacherReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Specify replica count of CSI Attacher. By default 3."
label: Longhorn CSI Attacher replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.provisionerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Specify replica count of CSI Provisioner. By default 3."
label: Longhorn CSI Provisioner replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.resizerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Specify replica count of CSI Resizer. By default 3."
label: Longhorn CSI Resizer replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.snapshotterReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Specify replica count of CSI Snapshotter. By default 3."
label: Longhorn CSI Snapshotter replica count
group: "Longhorn CSI Driver Settings"
- variable: defaultSettings.backupTarget
label: Backup Target
description: "The endpoint used to access the backupstore. NFS and S3 are supported."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.backupTargetCredentialSecret
label: Backup Target Credential Secret
description: "The name of the Kubernetes secret associated with the backup target."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
label: Allow Recurring Job While Volume Is Detached
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.createDefaultDiskLabeledNodes
label: Create Default Disk on Labeled Nodes
description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.defaultDataPath
label: Default Data Path
description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
group: "Longhorn Default Settings"
type: string
default: "/var/lib/longhorn/"
- variable: defaultSettings.defaultDataLocality
label: Default Data Locality
description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
The available modes are:
- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: defaultSettings.replicaSoftAntiAffinity
label: Replica Node Level Soft Anti-Affinity
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.replicaAutoBalance
label: Replica Auto Balance
description: 'Enable this setting automatically rebalances replicas when discovered an available node.
The available global options are:
- **disabled**. This is the default option. No replica auto-balance will be done.
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.
Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting.
The available volume spec options are:
- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
- **disabled**. This option instructs Longhorn no replica auto-balance should be done.
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "least-effort"
- "best-effort"
default: "disabled"
- variable: defaultSettings.storageOverProvisioningPercentage
label: Storage Over Provisioning Percentage
description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
group: "Longhorn Default Settings"
type: int
min: 0
default: 200
- variable: defaultSettings.storageMinimalAvailablePercentage
label: Storage Minimal Available Percentage
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 25
- variable: defaultSettings.storageReservedPercentageForDefaultDisk
label: Storage Reserved Percentage For Default Disk
description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 30
- variable: defaultSettings.upgradeChecker
label: Enable Upgrade Checker
description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.defaultReplicaCount
label: Default Replica Count
description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
group: "Longhorn Default Settings"
type: int
min: 1
max: 20
default: 3
- variable: defaultSettings.defaultLonghornStaticStorageClass
label: Default Longhorn Static StorageClass Name
description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
group: "Longhorn Default Settings"
type: string
default: "longhorn-static"
- variable: defaultSettings.backupstorePollInterval
label: Backupstore Poll Interval
description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.failedBackupTTL
label: Failed Backup Time to Live
description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.
Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.
Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1440
- variable: defaultSettings.restoreVolumeRecurringJobs
label: Restore Volume Recurring Jobs
description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
Longhorn also supports individual volume setting. The setting can be specified on Backup page when making a backup restoration, this overrules the global setting.
The available volume setting options are:
- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
- **enabled**. This option instructs Longhorn to restore recurring jobs/groups from the backup target forcibly.
- **disabled**. This option instructs Longhorn no restoring recurring jobs/groups should be done."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.recurringSuccessfulJobsHistoryLimit
label: Cronjob Successful Jobs History Limit
description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.recurringFailedJobsHistoryLimit
label: Cronjob Failed Jobs History Limit
description: "This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.supportBundleFailedHistoryLimit
label: SupportBundle Failed History Limit
description: "This setting specifies how many failed support bundles can exist in the cluster.
The retained failed support bundle is for analysis purposes and needs to clean up manually.
Set this value to **0** to have Longhorn automatically purge all failed support bundles."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.autoSalvage
label: Automatic salvage
description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.disableSchedulingOnCordonedNode
label: Disable Scheduling On Cordoned Node
description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.replicaZoneSoftAntiAffinity
label: Replica Zone Level Soft Anti-Affinity
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.nodeDownPodDeletionPolicy
label: Pod Deletion Policy When Node is Down
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
group: "Longhorn Default Settings"
type: enum
options:
- "do-nothing"
- "delete-statefulset-pod"
- "delete-deployment-pod"
- "delete-both-statefulset-and-deployment-pod"
default: "do-nothing"
- variable: defaultSettings.nodeDrainPolicy
label: Node Drain Policy
description: "Define the policy to use when a node with the last healthy replica of a volume is drained.
- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.
- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.
- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining."
group: "Longhorn Default Settings"
type: enum
options:
- "block-if-contains-last-replica"
- "allow-if-replica-is-stopped"
- "always-allow"
default: "block-if-contains-last-replica"
- variable: defaultSettings.replicaReplenishmentWaitInterval
label: Replica Replenishment Wait Interval
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
group: "Longhorn Default Settings"
type: int
min: 0
default: 600
- variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
label: Concurrent Replica Rebuild Per Node Limit
description: "This setting controls how many replicas on a node can be rebuilt simultaneously.
Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding.
WARNING:
- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
label: Concurrent Volume Backup Restore Per Node Limit
description: "This setting controls how many volumes on a node can restore the backup concurrently.
Longhorn blocks the backup restore once the restoring volume count exceeds the limit.
Set the value to **0** to disable backup restore."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.disableRevisionCounter
label: Disable Revision Counter
description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.systemManagedPodsImagePullPolicy
label: System Managed Pod Image Pull Policy
description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
group: "Longhorn Default Settings"
type: enum
options:
- "if-not-present"
- "always"
- "never"
default: "if-not-present"
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
label: Allow Volume Creation with Degraded Availability
description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
label: Automatically Cleanup System Generated Snapshot
description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
label: Concurrent Automatic Engine Upgrade Per Node Limit
description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version."
group: "Longhorn Default Settings"
type: int
min: 0
default: 0
- variable: defaultSettings.backingImageCleanupWaitInterval
label: Backing Image Cleanup Wait Interval
description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it."
group: "Longhorn Default Settings"
type: int
min: 0
default: 60
- variable: defaultSettings.backingImageRecoveryWaitInterval
label: Backing Image Recovery Wait Interval
description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown.
WARNING:
- This recovery only works for the backing image of which the creation type is \"download\".
- File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.guaranteedInstanceManagerCPU
label: Guaranteed Instance Manager CPU
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each instance manager pod on this node. This will help maintain engine and replica stability during high node workload.
In order to prevent unexpected volume instance (engine/replica) crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
`Guaranteed Instance Manager CPU = The estimated max Longhorn volume engine and replica count on a node * 0.1 / The total allocatable CPUs on the node * 100`
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
WARNING:
- Value 0 means unsetting CPU requests for instance manager pods.
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40.
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
- This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
- After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
group: "Longhorn Default Settings"
type: int
min: 0
max: 40
default: 12
- variable: defaultSettings.logLevel
label: Log Level
description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. By default Debug."
group: "Longhorn Default Settings"
type: string
default: "Info"
- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
label: Kubernetes Cluster Autoscaler Enabled (Experimental)
description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions:
- No volume attached to the node.
- Is not the last node containing the replica of any volume.
- Is not running backing image components pod.
- Is not running share manager components pod."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.orphanAutoDeletion
label: Orphaned Data Cleanup
description: "This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas. Orphan resources on down or unknown nodes will not be cleaned up automatically."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.storageNetwork
label: Storage Network
description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in \"<namespace>/<name>\" format.
WARNING:
- The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes.
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes.
- When applying the setting, Longhorn will restart all manager, instance-manager, and backing-image-manager pods."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.deletingConfirmationFlag
label: Deleting Confirmation Flag
description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
Set this flag to **true** to allow Longhorn uninstallation.
If this flag **false**, Longhorn uninstallation job will fail. "
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.engineReplicaTimeout
label: Timeout between Engine and Replica
description: "In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds. The default value is 8 seconds."
group: "Longhorn Default Settings"
type: int
default: "8"
- variable: defaultSettings.snapshotDataIntegrity
label: Snapshot Data Integrity
description: "This setting allows users to enable or disable snapshot hashing and data integrity checking.
Available options are
- **disabled**: Disable snapshot disk file hashing and data integrity checking.
- **enabled**: Enables periodic snapshot disk file hashing and data integrity checking. To detect the filesystem-unaware corruption caused by bit rot or other issues in snapshot disk files, Longhorn system periodically hashes files and finds corrupted ones. Hence, the system performance will be impacted during the periodical checking.
- **fast-check**: Enable snapshot disk file hashing and fast data integrity checking. Longhorn system only hashes snapshot disk files if their are not hashed or the modification time are changed. In this mode, filesystem-unaware corruption cannot be detected, but the impact on system performance can be minimized."
group: "Longhorn Default Settings"
type: string
default: "disabled"
- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation
label: Immediate Snapshot Data Integrity Check After Creating a Snapshot
description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.snapshotDataIntegrityCronjob
label: Snapshot Data Integrity Check CronJob
description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
Warning: Hashing snapshot disk files impacts the performance of the system. It is recommended to run data integrity checks during off-peak times and to reduce the frequency of checks."
group: "Longhorn Default Settings"
type: string
default: "0 0 */7 * *"
- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
label: Remove Snapshots During Filesystem Trim
description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children.\n\n
Since Longhorn filesystem trim feature can be applied to the volume head and the followed continuous removed or system snapshots only.\n\n
Notice that trying to trim a removed files from a valid snapshot will do nothing but the filesystem will discard this kind of in-memory trimmable file info.\n\n
Later on if you mark the snapshot as removed and want to retry the trim, you may need to unmount and remount the filesystem so that the filesystem can recollect the trimmable file info."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.fastReplicaRebuildEnabled
label: Fast Replica Rebuild Enabled
description: "This feature supports the fast replica rebuilding. It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.replicaFileSyncHttpClientTimeout
label: Timeout of HTTP Client to Replica File Sync Server
description: "In seconds. The setting specifies the HTTP client timeout to the file sync server."
group: "Longhorn Default Settings"
type: int
default: "30"
- variable: defaultSettings.backupCompressionMethod
label: Backup Compression Method
description: "This setting allows users to specify backup compression method.
Available options are
- **none**: Disable the compression method. Suitable for multimedia data such as encoded images and videos.
- **lz4**: Fast compression method. Suitable for flat files.
- **gzip**: A bit of higher compression ratio but relatively slow."
group: "Longhorn Default Settings"
type: string
default: "lz4"
- variable: defaultSettings.backupConcurrentLimit
label: Backup Concurrent Limit Per Backup
description: "This setting controls how many worker threads per backup concurrently."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.restoreConcurrentLimit
label: Restore Concurrent Limit Per Backup
description: "This setting controls how many worker threads per restore concurrently."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.v2DataEngine
label: V2 Data Engine
description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment.
WARNING:
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
- When applying the setting, Longhorn will restart all instance-manager pods.
- When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
type: boolean
default: false
- variable: defaultSettings.offlineReplicaRebuilding
label: Offline Replica Rebuilding
description: "This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
required: true
type: enum
options:
- "enabled"
- "disabled"
default: "enabled"
- variable: persistence.defaultClass
default: "true"
description: "Set as default StorageClass for Longhorn"
label: Default Storage Class
group: "Longhorn Storage Class Settings"
required: true
type: boolean
- variable: persistence.reclaimPolicy
label: Storage Class Retain Policy
description: "Define reclaim policy (Retain or Delete)"
group: "Longhorn Storage Class Settings"
required: true
type: enum
options:
- "Delete"
- "Retain"
default: "Delete"
- variable: persistence.defaultClassReplicaCount
description: "Set replica count for Longhorn StorageClass"
label: Default Storage Class Replica Count
group: "Longhorn Storage Class Settings"
type: int
min: 1
max: 10
default: 3
- variable: persistence.defaultDataLocality
description: "Set data locality for Longhorn StorageClass"
label: Default Storage Class Data Locality
group: "Longhorn Storage Class Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: persistence.recurringJobSelector.enable
description: "Enable recurring job selector for Longhorn StorageClass"
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Recurring Job Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.recurringJobSelector.jobList
description: 'Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "isGroup":true}]'
label: Storage Class Recurring Job Selector List
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.defaultNodeSelector.enable
description: "Enable Node selector for Longhorn StorageClass"
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Node Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.defaultNodeSelector.selector
label: Storage Class Node Selector
description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged with its value'
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.enable
description: "Set backing image for Longhorn StorageClass"
group: "Longhorn Storage Class Settings"
label: Default Storage Class Backing Image
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.backingImage.name
description: 'Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it.'
label: Storage Class Backing Image Name
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.expectedChecksum
description: 'Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- It is not recommended to set this field if the data source type is \"export-from-volume\".'
label: Storage Class Backing Image Expected SHA512 Checksum
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.dataSourceType
description: 'Specify the data source type for the backing image used in Longhorn StorageClass.
If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
label: Storage Class Backing Image Data Source Type
group: "Longhorn Storage Class Settings"
type: enum
options:
- ""
- "download"
- "upload"
- "export-from-volume"
default: ""
- variable: persistence.backingImage.dataSourceParameters
description: "Specify the data source parameters for the backing image used in Longhorn StorageClass.
If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
This option accepts a json string of a map. e.g., '{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- Be careful of the quotes here."
label: Storage Class Backing Image Data Source Parameters
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.removeSnapshotsDuringFilesystemTrim
description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass"
label: Default Storage Class Remove Snapshots During Filesystem Trim
group: "Longhorn Storage Class Settings"
type: enum
options:
- "ignored"
- "enabled"
- "disabled"
default: "ignored"
- variable: ingress.enabled
default: "false"
description: "Expose app using Layer 7 Load Balancer - ingress"
type: boolean
group: "Services and Load Balancing"
label: Expose app using Layer 7 Load Balancer
show_subquestion_if: true
subquestions:
- variable: ingress.host
default: "xip.io"
description: "layer 7 Load Balancer hostname"
type: hostname
required: true
label: Layer 7 Load Balancer Hostname
- variable: ingress.path
default: "/"
description: "If ingress is enabled you can set the default ingress path"
type: string
required: true
label: Ingress Path
- variable: service.ui.type
default: "Rancher-Proxy"
description: "Define Longhorn UI service type"
type: enum
options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"
- "Rancher-Proxy"
label: Longhorn UI Service
show_if: "ingress.enabled=false"
group: "Services and Load Balancing"
show_subquestion_if: "NodePort"
subquestions:
- variable: service.ui.nodePort
default: ""
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
type: int
min: 30000
max: 32767
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
label: UI Service NodePort number
- variable: enablePSP
default: "false"
description: "Setup a pod security policy for Longhorn workloads."
label: Pod Security Policy
type: boolean
group: "Other Settings"
- variable: global.cattle.windowsCluster.enabled
default: "false"
description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster."
label: Rancher Windows Cluster
type: boolean
group: "Other Settings"
- variable: networkPolicies.enabled
description: "Enable NetworkPolicies to limit access to the longhorn pods.
Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added."
group: "Other Settings"
label: Network Policies
default: "false"
type: boolean
subquestions:
- variable: networkPolicies.type
label: Network Policies for Ingress
description: "Create the policy to allow access for the ingress, select the distribution."
show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
type: enum
default: "rke2"
options:
- "rke1"
- "rke2"
- "k3s"

View File

@ -0,0 +1,5 @@
Longhorn is now installed on the cluster!
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
Visit our documentation at https://longhorn.io/docs/

View File

@ -0,0 +1,66 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "longhorn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "longhorn.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "longhorn.managerIP" -}}
{{- $fullname := (include "longhorn.fullname" .) -}}
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "secret" }}
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
{{- end }}
{{- /*
longhorn.labels generates the standard Helm labels.
*/ -}}
{{- define "longhorn.labels" -}}
app.kubernetes.io/name: {{ template "longhorn.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.privateRegistry.registryUrl -}}
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
{{- else -}}
{{ include "system_default_registry" . }}
{{- end -}}
{{- end -}}
{{- /*
define the longhorn release namespace
*/ -}}
{{- define "release_namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,61 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups: [""]
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"]
verbs: ["*"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["daemonsets", "statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets", "podsecuritypolicies"]
verbs: ["*"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
verbs: ["*"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
verbs: ["*"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
verbs: ["get", "list", "create", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"]
verbs: ["*"]

View File

@ -0,0 +1,27 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-bind
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-support-bundle
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}

View File

@ -0,0 +1,151 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-manager
namespace: {{ include "release_namespace" . }}
spec:
selector:
matchLabels:
app: longhorn-manager
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-manager
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true
command:
- longhorn-manager
- -d
{{- if eq .Values.longhornManager.log.format "json" }}
- -j
{{- end }}
- daemon
- --engine-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
- --instance-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
- --share-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
- --backing-image-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
- --support-bundle-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}"
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --service-account
- longhorn-service-account
ports:
- containerPort: 9500
name: manager
- containerPort: 9501
name: conversion-wh
- containerPort: 9502
name: admission-wh
- containerPort: 9503
name: recov-backend
readinessProbe:
httpGet:
path: /v1/healthz
port: 9501
scheme: HTTPS
volumeMounts:
- name: dev
mountPath: /host/dev/
- name: proc
mountPath: /host/proc/
- name: longhorn
mountPath: /var/lib/longhorn/
mountPropagation: Bidirectional
- name: longhorn-grpc-tls
mountPath: /tls-files/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumes:
- name: dev
hostPath:
path: /dev/
- name: proc
hostPath:
path: /proc/
- name: longhorn
hostPath:
path: /var/lib/longhorn/
- name: longhorn-grpc-tls
secret:
secretName: longhorn-grpc-tls
optional: true
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-backend
namespace: {{ include "release_namespace" . }}
{{- if .Values.longhornManager.serviceAnnotations }}
annotations:
{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.manager.type }}
sessionAffinity: ClientIP
selector:
app: longhorn-manager
ports:
- name: manager
port: 9500
targetPort: manager
{{- if .Values.service.manager.nodePort }}
nodePort: {{ .Values.service.manager.nodePort }}
{{- end }}

View File

@ -0,0 +1,83 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
default-setting.yaml: |-
{{ if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }}backup-target: {{ .Values.defaultSettings.backupTarget }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }}backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }}allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }}create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }}default-data-path: {{ .Values.defaultSettings.defaultDataPath }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }}replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }}replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }}storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }}storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }}storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }}upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }}default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }}restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }}recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }}recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }}support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }}{{ end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
{{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
{{- end -}}
{{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
{{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
{{- end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }}disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }}node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }}concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }}concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }}disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }}system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }}allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }}auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }}concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }}backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }}backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }}guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }}kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }}orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }}storage-network: {{ .Values.defaultSettings.storageNetwork }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }}deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }}engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }}snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }}snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }}snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }}remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }}fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }}replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.logLevel) }}log-level: {{ .Values.defaultSettings.logLevel }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }}backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }}backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }}restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }}v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.offlineReplicaRebuilding) }}offline-replica-rebuilding: {{ .Values.defaultSettings.offlineReplicaRebuilding }}{{ end }}

View File

@ -0,0 +1,118 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-driver-deployer
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-driver-deployer
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --manager-url
- http://longhorn-backend:9500/v1
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{- if .Values.csi.kubeletRootDir }}
- name: KUBELET_ROOT_DIR
value: {{ .Values.csi.kubeletRootDir }}
{{- end }}
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
- name: CSI_ATTACHER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
{{- end }}
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
- name: CSI_PROVISIONER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
{{- end }}
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
{{- end }}
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
- name: CSI_RESIZER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
{{- end }}
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
- name: CSI_SNAPSHOTTER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
{{- end }}
{{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
- name: CSI_LIVENESS_PROBE_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
{{- end }}
{{- if .Values.csi.attacherReplicaCount }}
- name: CSI_ATTACHER_REPLICA_COUNT
value: {{ .Values.csi.attacherReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.provisionerReplicaCount }}
- name: CSI_PROVISIONER_REPLICA_COUNT
value: {{ .Values.csi.provisionerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.resizerReplicaCount }}
- name: CSI_RESIZER_REPLICA_COUNT
value: {{ .Values.csi.resizerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.snapshotterReplicaCount }}
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
{{- end }}
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
securityContext:
runAsUser: 0

View File

@ -0,0 +1,114 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
spec:
replicas: {{ .Values.longhornUI.replicas }}
selector:
matchLabels:
app: longhorn-ui
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-ui
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- longhorn-ui
topologyKey: kubernetes.io/hostname
containers:
- name: longhorn-ui
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
ports:
- containerPort: 8000
name: http
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
- name: LONGHORN_UI_PORT
value: "8000"
volumes:
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornUI.priorityClass }}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
---
kind: Service
apiVersion: v1
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
kubernetes.io/cluster-service: "true"
{{- end }}
name: longhorn-frontend
namespace: {{ include "release_namespace" . }}
spec:
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
type: ClusterIP
{{- else }}
type: {{ .Values.service.ui.type }}
{{- end }}
{{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
selector:
app: longhorn-ui
ports:
- name: http
port: 80
targetPort: http
{{- if .Values.service.ui.nodePort }}
nodePort: {{ .Values.service.ui.nodePort }}
{{- else }}
nodePort: null
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else -}}
apiVersion: networking.k8s.io/v1beta1
{{- end }}
kind: Ingress
metadata:
name: longhorn-ingress
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ingress
annotations:
{{- if .Values.ingress.secureBackends }}
ingress.kubernetes.io/secure-backends: "true"
{{- end }}
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ default "" .Values.ingress.path }}
{{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: ImplementationSpecific
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: longhorn-frontend
port:
number: 80
{{- else }}
serviceName: longhorn-frontend
servicePort: 80
{{- end }}
{{- if .Values.ingress.tls }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Values.ingress.tlsSecret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-data-source
namespace: longhorn-system
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-manager
namespace: longhorn-system
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: instance-manager
namespace: longhorn-system
spec:
podSelector:
matchLabels:
longhorn.io/component: instance-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-manager
namespace: longhorn-system
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
app: longhorn-ui
- podSelector:
matchLabels:
app: longhorn-csi-plugin
- podSelector:
matchLabels:
longhorn.io/managed-by: longhorn-manager
matchExpressions:
- { key: recurring-job.longhorn.io, operator: Exists }
- podSelector:
matchExpressions:
- { key: longhorn.io/job-task, operator: Exists }
- podSelector:
matchLabels:
app: longhorn-driver-deployer
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-recovery-backend
namespace: longhorn-system
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9503
{{- end }}

View File

@ -0,0 +1,46 @@
{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-ui-frontend
namespace: longhorn-system
spec:
podSelector:
matchLabels:
app: longhorn-ui
policyTypes:
- Ingress
ingress:
- from:
{{- if eq .Values.networkPolicies.type "rke1"}}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: ingress-nginx
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
{{- else if eq .Values.networkPolicies.type "rke2" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: rke2-ingress-nginx
app.kubernetes.io/name: rke2-ingress-nginx
{{- else if eq .Values.networkPolicies.type "k3s" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ports:
- port: 8000
protocol: TCP
- port: 80
protocol: TCP
{{- end }}
{{- end }}

View File

@ -0,0 +1,33 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-conversion-webhook
namespace: longhorn-system
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9501
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-admission-webhook
namespace: longhorn-system
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9502
{{- end }}

View File

@ -0,0 +1,56 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
name: longhorn-post-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-post-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-post-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- post-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,58 @@
{{- if .Values.helmPreUpgradeCheckerJob.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
name: longhorn-pre-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-pre-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-pre-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- pre-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,66 @@
{{- if .Values.enablePSP }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: longhorn-psp
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- NET_RAW
allowedCapabilities:
- SYS_ADMIN
hostNetwork: false
hostIPC: false
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
fsGroup:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- secret
- projected
- hostPath
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: longhorn-psp-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- longhorn-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: longhorn-psp-binding
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: longhorn-psp-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: default
namespace: {{ include "release_namespace" . }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.privateRegistry.createSecret }}
{{- if .Values.privateRegistry.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.privateRegistry.registrySecret }}
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "secret" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,74 @@
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-conversion-webhook
name: longhorn-conversion-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
sessionAffinity: ClientIP
selector:
app: longhorn-manager
ports:
- name: conversion-webhook
port: 9501
targetPort: conversion-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-admission-webhook
name: longhorn-admission-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
sessionAffinity: ClientIP
selector:
app: longhorn-manager
ports:
- name: admission-webhook
port: 9502
targetPort: admission-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-recovery-backend
name: longhorn-recovery-backend
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
sessionAffinity: ClientIP
selector:
app: longhorn-manager
ports:
- name: recovery-backend
port: 9503
targetPort: recov-backend
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-engine-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: engine
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-replica-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: replica

View File

@ -0,0 +1,44 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-storageclass
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
storageclass.yaml: |
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
annotations:
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
staleReplicaTimeout: "30"
fromBackup: ""
{{- if .Values.persistence.defaultFsType }}
fsType: "{{ .Values.persistence.defaultFsType }}"
{{- end }}
{{- if .Values.persistence.defaultMkfsParams }}
mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}"
{{- end }}
{{- if .Values.persistence.migratable }}
migratable: "{{ .Values.persistence.migratable }}"
{{- end }}
{{- if .Values.persistence.backingImage.enable }}
backingImage: {{ .Values.persistence.backingImage.name }}
backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
{{- end }}
{{- if .Values.persistence.recurringJobSelector.enable }}
recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
{{- end }}
dataLocality: {{ .Values.persistence.defaultDataLocality | quote }}
{{- if .Values.persistence.defaultNodeSelector.enable }}
nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}"
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
namespace: {{ include "release_namespace" $ }}
labels: {{- include "longhorn.labels" $ | nindent 4 }}
app: longhorn
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
name: longhorn-uninstall
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-uninstall
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-uninstall
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- uninstall
- --force
env:
- name: LONGHORN_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "get", "list", "watch" ]

View File

@ -0,0 +1,34 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageDataSource" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Backup" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupTarget" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupVolume" false -}}
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
# {{- set $found "longhorn.io/v1beta2/Orphan" false -}}
# {{- set $found "longhorn.io/v1beta1/RecurringJob" false -}}
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
# {{- set $found "longhorn.io/v1beta2/Snapshot" false -}}
# {{- set $found "longhorn.io/v1beta2/SupportBundle" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemBackup" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemRestore" false -}}
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
# {{- set $found "longhorn.io/v1beta2/VolumeAttachment" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.enablePSP }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,296 @@
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
windowsCluster:
# Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
enabled: false
# Tolerate Linux node taint
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# Select Linux nodes
nodeSelector:
kubernetes.io/os: "linux"
# Recognize toleration and node selector for Longhorn run-time created components
defaultSetting:
taintToleration: cattle.io/os=linux:NoSchedule
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
networkPolicies:
enabled: false
# Available types: k3s, rke2, rke1
type: "k3s"
image:
longhorn:
engine:
repository: rancher/mirrored-longhornio-longhorn-engine
tag: v1.5.3
manager:
repository: rancher/mirrored-longhornio-longhorn-manager
tag: v1.5.3
ui:
repository: rancher/mirrored-longhornio-longhorn-ui
tag: v1.5.3
instanceManager:
repository: rancher/mirrored-longhornio-longhorn-instance-manager
tag: v1.5.3
shareManager:
repository: rancher/mirrored-longhornio-longhorn-share-manager
tag: v1.5.3
backingImageManager:
repository: rancher/mirrored-longhornio-backing-image-manager
tag: v1.5.3
supportBundleKit:
repository: rancher/mirrored-longhornio-support-bundle-kit
tag: v0.0.27
csi:
attacher:
repository: rancher/mirrored-longhornio-csi-attacher
tag: v4.2.0
provisioner:
repository: rancher/mirrored-longhornio-csi-provisioner
tag: v3.4.1
nodeDriverRegistrar:
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
tag: v2.7.0
resizer:
repository: rancher/mirrored-longhornio-csi-resizer
tag: v1.7.0
snapshotter:
repository: rancher/mirrored-longhornio-csi-snapshotter
tag: v6.2.1
livenessProbe:
repository: rancher/mirrored-longhornio-livenessprobe
tag: v2.9.0
pullPolicy: IfNotPresent
service:
ui:
type: ClusterIP
nodePort: null
manager:
type: ClusterIP
nodePort: ""
loadBalancerIP: ""
loadBalancerSourceRanges: ""
persistence:
defaultClass: true
defaultFsType: ext4
defaultMkfsParams: ""
defaultClassReplicaCount: 3
defaultDataLocality: disabled # best-effort otherwise
reclaimPolicy: Delete
migratable: false
recurringJobSelector:
enable: false
jobList: []
backingImage:
enable: false
name: ~
dataSourceType: ~
dataSourceParameters: ~
expectedChecksum: ~
defaultNodeSelector:
enable: false # disable by default
selector: ""
removeSnapshotsDuringFilesystemTrim: ignored # "enabled" or "disabled" otherwise
helmPreUpgradeCheckerJob:
enabled: true
csi:
kubeletRootDir: ~
attacherReplicaCount: ~
provisionerReplicaCount: ~
resizerReplicaCount: ~
snapshotterReplicaCount: ~
defaultSettings:
backupTarget: ~
backupTargetCredentialSecret: ~
allowRecurringJobWhileVolumeDetached: ~
createDefaultDiskLabeledNodes: ~
defaultDataPath: ~
defaultDataLocality: ~
replicaSoftAntiAffinity: ~
replicaAutoBalance: ~
storageOverProvisioningPercentage: ~
storageMinimalAvailablePercentage: ~
storageReservedPercentageForDefaultDisk: ~
upgradeChecker: ~
defaultReplicaCount: ~
defaultLonghornStaticStorageClass: ~
backupstorePollInterval: ~
failedBackupTTL: ~
restoreVolumeRecurringJobs: ~
recurringSuccessfulJobsHistoryLimit: ~
recurringFailedJobsHistoryLimit: ~
supportBundleFailedHistoryLimit: ~
taintToleration: ~
systemManagedComponentsNodeSelector: ~
priorityClass: ~
autoSalvage: ~
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
disableSchedulingOnCordonedNode: ~
replicaZoneSoftAntiAffinity: ~
nodeDownPodDeletionPolicy: ~
nodeDrainPolicy: ~
replicaReplenishmentWaitInterval: ~
concurrentReplicaRebuildPerNodeLimit: ~
concurrentVolumeBackupRestorePerNodeLimit: ~
disableRevisionCounter: ~
systemManagedPodsImagePullPolicy: ~
allowVolumeCreationWithDegradedAvailability: ~
autoCleanupSystemGeneratedSnapshot: ~
concurrentAutomaticEngineUpgradePerNodeLimit: ~
backingImageCleanupWaitInterval: ~
backingImageRecoveryWaitInterval: ~
guaranteedInstanceManagerCPU: ~
kubernetesClusterAutoscalerEnabled: ~
orphanAutoDeletion: ~
storageNetwork: ~
deletingConfirmationFlag: ~
engineReplicaTimeout: ~
snapshotDataIntegrity: ~
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
snapshotDataIntegrityCronjob: ~
removeSnapshotsDuringFilesystemTrim: ~
fastReplicaRebuildEnabled: ~
replicaFileSyncHttpClientTimeout: ~
logLevel: ~
backupCompressionMethod: ~
backupConcurrentLimit: ~
restoreConcurrentLimit: ~
v2DataEngine: ~
offlineReplicaRebuilding: ~
privateRegistry:
createSecret: ~
registryUrl: ~
registryUser: ~
registryPasswd: ~
registrySecret: ~
longhornManager:
log:
## Allowed values are `plain` or `json`.
format: plain
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
replicas: 2
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
ingress:
## Set to true to enable ingress record generation
enabled: false
## Add ingressClassName to the Ingress
## Can replace the kubernetes.io/ingress.class annotation on v1.18+
ingressClassName: ~
host: sslip.io
## Set this to true in order to enable TLS on the ingress record
tls: false
## Enable this in order to enable that the backend service will be connected at port 443
secureBackends: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: longhorn.local-tls
## If ingress is enabled you can set the default ingress path
## then you can access the UI by using the following full path {{host}}+{{path}}
path: /
## Ingress annotations done as key:value pairs
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
enablePSP: false
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
## and its release namespace is not the `longhorn-system`
namespaceOverride: ""
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
annotations: {}
serviceAccount:
# Annotations to add to the service account
annotations: {}

View File

@ -0,0 +1,16 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/release-name: neuvector-crd
apiVersion: v1
appVersion: 5.2.4
description: Helm chart for NeuVector's CRD services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-crd
type: application
version: 102.0.6+up2.6.6

View File

@ -0,0 +1,14 @@
# NeuVector Helm Chart
Helm chart for NeuVector container security's CRD services. NeuVector's CRD (Custom Resource Definition) capture and declare application security policies early in the pipeline, then defined policies can be deployed together with the container applications.
Because the CRD policies can be deployed before NeuVector's core product, this separate helm chart is created. For the backward compatibility reason, crd.yaml is not removed in the 'core' chart. If you use this 'crd' chart, please set `crdwebhook.enabled` to false in the 'core' chart.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`openshift` | If deploying in OpenShift, set this to true | `false` |
`crdwebhook.type` | crd webhook type | `ClusterIP` |

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,845 @@
{{- if .Values.crdwebhook.enabled -}}
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apiextensions.k8s.io/v1
{{- else }}
apiVersion: apiextensions.k8s.io/v1beta1
{{- end }}
kind: CustomResourceDefinition
metadata:
name: nvsecurityrules.neuvector.com
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
group: neuvector.com
names:
kind: NvSecurityRule
listKind: NvSecurityRuleList
plural: nvsecurityrules
singular: nvsecurityrule
scope: Namespaced
{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
version: v1
{{- end }}
versions:
- name: v1
served: true
storage: true
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
schema:
openAPIV3Schema:
properties:
spec:
properties:
egress:
items:
properties:
action:
enum:
- allow
- deny
type: string
applications:
items:
type: string
type: array
name:
type: string
ports:
type: string
priority:
type: integer
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- action
- name
- selector
type: object
type: array
file:
items:
properties:
app:
items:
type: string
type: array
behavior:
enum:
- monitor_change
- block_access
type: string
filter:
type: string
recursive:
type: boolean
required:
- behavior
- filter
type: object
type: array
ingress:
items:
properties:
action:
enum:
- allow
- deny
type: string
applications:
items:
type: string
type: array
name:
type: string
ports:
type: string
priority:
type: integer
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- action
- name
- selector
type: object
type: array
process:
items:
properties:
action:
enum:
- allow
- deny
type: string
allow_update:
type: boolean
name:
type: string
path:
type: string
required:
- action
type: object
type: array
process_profile:
properties:
baseline:
enum:
- default
- shield
- basic
- zero-drift
type: string
type: object
target:
properties:
policymode:
enum:
- Discover
- Monitor
- Protect
- N/A
type: string
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- selector
type: object
dlp:
properties:
settings:
items:
properties:
action:
enum:
- allow
- deny
type: string
name:
type: string
required:
- name
- action
type: object
type: array
status:
type: boolean
type: object
waf:
properties:
settings:
items:
properties:
action:
enum:
- allow
- deny
type: string
name:
type: string
required:
- name
- action
type: object
type: array
status:
type: boolean
type: object
required:
- target
type: object
type: object
{{- end }}
---
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apiextensions.k8s.io/v1
{{- else }}
apiVersion: apiextensions.k8s.io/v1beta1
{{- end }}
kind: CustomResourceDefinition
metadata:
name: nvclustersecurityrules.neuvector.com
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
group: neuvector.com
names:
kind: NvClusterSecurityRule
listKind: NvClusterSecurityRuleList
plural: nvclustersecurityrules
singular: nvclustersecurityrule
scope: Cluster
{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
version: v1
{{- end }}
versions:
- name: v1
served: true
storage: true
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
schema:
openAPIV3Schema:
properties:
spec:
properties:
egress:
items:
properties:
action:
enum:
- allow
- deny
type: string
applications:
items:
type: string
type: array
name:
type: string
ports:
type: string
priority:
type: integer
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- action
- name
- selector
type: object
type: array
file:
items:
properties:
app:
items:
type: string
type: array
behavior:
enum:
- monitor_change
- block_access
type: string
filter:
type: string
recursive:
type: boolean
required:
- behavior
- filter
type: object
type: array
ingress:
items:
properties:
action:
enum:
- allow
- deny
type: string
applications:
items:
type: string
type: array
name:
type: string
ports:
type: string
priority:
type: integer
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- action
- name
- selector
type: object
type: array
process:
items:
properties:
action:
enum:
- allow
- deny
type: string
allow_update:
type: boolean
name:
type: string
path:
type: string
required:
- action
type: object
type: array
process_profile:
properties:
baseline:
enum:
- default
- shield
- basic
- zero-drift
type: string
type: object
target:
properties:
policymode:
enum:
- Discover
- Monitor
- Protect
- N/A
type: string
selector:
properties:
comment:
type: string
criteria:
items:
properties:
key:
type: string
op:
type: string
value:
type: string
required:
- key
- op
- value
type: object
type: array
name:
type: string
original_name:
type: string
required:
- name
type: object
required:
- selector
type: object
dlp:
properties:
settings:
items:
properties:
action:
enum:
- allow
- deny
type: string
name:
type: string
required:
- name
- action
type: object
type: array
status:
type: boolean
type: object
waf:
properties:
settings:
items:
properties:
action:
enum:
- allow
- deny
type: string
name:
type: string
required:
- name
- action
type: object
type: array
status:
type: boolean
type: object
required:
- target
type: object
type: object
{{- end }}
---
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apiextensions.k8s.io/v1
{{- else }}
apiVersion: apiextensions.k8s.io/v1beta1
{{- end }}
kind: CustomResourceDefinition
metadata:
name: nvdlpsecurityrules.neuvector.com
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
group: neuvector.com
names:
kind: NvDlpSecurityRule
listKind: NvDlpSecurityRuleList
plural: nvdlpsecurityrules
singular: nvdlpsecurityrule
scope: Cluster
{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
version: v1
{{- end }}
versions:
- name: v1
served: true
storage: true
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
schema:
openAPIV3Schema:
properties:
spec:
properties:
sensor:
properties:
comment:
type: string
name:
type: string
rules:
items:
properties:
name:
type: string
patterns:
items:
properties:
context:
enum:
- url
- header
- body
- packet
type: string
key:
enum:
- pattern
type: string
op:
enum:
- regex
- '!regex'
type: string
value:
type: string
required:
- key
- op
- value
- context
type: object
type: array
required:
- name
- patterns
type: object
type: array
required:
- name
type: object
required:
- sensor
type: object
type: object
{{- end }}
---
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apiextensions.k8s.io/v1
{{- else }}
apiVersion: apiextensions.k8s.io/v1beta1
{{- end }}
kind: CustomResourceDefinition
metadata:
name: nvadmissioncontrolsecurityrules.neuvector.com
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
group: neuvector.com
names:
kind: NvAdmissionControlSecurityRule
listKind: NvAdmissionControlSecurityRuleList
plural: nvadmissioncontrolsecurityrules
singular: nvadmissioncontrolsecurityrule
scope: Cluster
{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
version: v1
{{- end }}
versions:
- name: v1
served: true
storage: true
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
schema:
openAPIV3Schema:
properties:
spec:
properties:
config:
properties:
client_mode:
enum:
- service
- url
type: string
enable:
type: boolean
mode:
enum:
- monitor
- protect
type: string
required:
- enable
- mode
- client_mode
type: object
rules:
items:
properties:
action:
enum:
- allow
- deny
type: string
comment:
type: string
criteria:
items:
properties:
name:
type: string
op:
type: string
path:
type: string
sub_criteria:
items:
properties:
name:
type: string
op:
type: string
value:
type: string
required:
- name
- op
- value
type: object
type: array
template_kind:
type: string
type:
type: string
value:
type: string
value_type:
type: string
required:
- name
- op
- value
type: object
type: array
disabled:
type: boolean
id:
type: integer
rule_mode:
enum:
- ""
- monitor
- protect
type: string
required:
- action
- criteria
type: object
type: array
type: object
type: object
{{- end }}
---
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apiextensions.k8s.io/v1
{{- else }}
apiVersion: apiextensions.k8s.io/v1beta1
{{- end }}
kind: CustomResourceDefinition
metadata:
name: nvwafsecurityrules.neuvector.com
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
group: neuvector.com
names:
kind: NvWafSecurityRule
listKind: NvWafSecurityRuleList
plural: nvwafsecurityrules
singular: nvwafsecurityrule
scope: Cluster
{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
version: v1
{{- end }}
versions:
- name: v1
served: true
storage: true
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
schema:
openAPIV3Schema:
properties:
spec:
properties:
sensor:
properties:
comment:
type: string
name:
type: string
rules:
items:
properties:
name:
type: string
patterns:
items:
properties:
context:
enum:
- url
- header
- body
- packet
type: string
key:
enum:
- pattern
type: string
op:
enum:
- regex
- '!regex'
type: string
value:
type: string
required:
- key
- op
- value
- context
type: object
type: array
required:
- name
- patterns
type: object
type: array
required:
- name
type: object
required:
- sensor
type: object
type: object
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-crd-webhook
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: Helm
spec:
ports:
- port: 443
targetPort: 30443
protocol: TCP
name: crd-webhook
type: {{ .Values.crdwebhook.type }}
selector:
app: neuvector-controller-pod
{{- end }}

View File

@ -0,0 +1,9 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
openshift: false
crdwebhook:
type: ClusterIP
enabled: true

View File

@ -0,0 +1,26 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: NeuVector Monitor
catalog.cattle.io/kube-version: '>=1.18.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/provides-gvr: neuvector.com/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: neuvector-monitor
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 2.6.6
apiVersion: v1
appVersion: 5.2.4
description: Helm feature chart for NeuVector monitor services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
keywords:
- security
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-monitor
sources:
- https://github.com/neuvector/neuvector
version: 102.0.6+up2.6.6

View File

@ -0,0 +1,22 @@
# NeuVector Helm Chart
Helm chart for NeuVector's monitoring services.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`registry` | NeuVector container registry | `registry.neuvector.com` |
`oem` | OEM release name | `nil` |
`leastPrivilege` | Assume monitor chart is always installed after the core chart, so service accounts created by the core chart will be used. Keep this value as same as in the core chart. | `false` |
`exporter.enabled` | If true, create Prometheus exporter | `false` |
`exporter.image.repository` | exporter image name | `neuvector/prometheus-exporter` |
`exporter.image.tag` | exporter image tag | `latest` |
`exporter.ctrlSercretName` | existing secret that have CTRL_USERNAME and CTRL_PASSWORD fields to login to the controller. | `nil` | if parameter exists then `exporter.CTRL_USERNAME` & `exporter.CTRL_PASSWORD` will be skipped
`exporter.CTRL_USERNAME` | Username to login to the controller. Suggest to replace the default admin user to a read-only user | `admin` |
`exporter.CTRL_PASSWORD` | Password to login to the controller. | `admin` |
---

View File

@ -0,0 +1,5 @@
### Run-Time Protection Without Compromise
NeuVector delivers a complete run-time security solution with container process/file system protection and vulnerability scanning combined with the only true Layer 7 container firewall. Protect sensitive data with a complete container security platform.
Helm chart for NeuVector's monitoring services. Please make sure REST API service for controller in core chart is enabled.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
questions:
#monitor configurations
- variable: exporter.image.repository
default: "neuvector/prometheus-exporter"
description: exporter image repository
type: string
label: Exporter Image Path
group: "Container Images"
- variable: exporter.image.tag
default: ""
description: image tag for exporter
type: string
label: exporter Image Tag
group: "Container Images"
#controller crendential configuration
- variable: exporter.CTRL_USERNAME
default: "admin"
description: Controller Username
type: string
label: Controller Username
group: "Controller Crendential"
- variable: exporter.CTRL_PASSWORD
default: "admin"
description: Controller Password
type: string
label: Controller Password
group: "Controller Crendential"

View File

@ -0,0 +1,40 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{- if .Values.exporter.grafanaDashboard.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: nv-grafana-dashboard
namespace: {{ .Values.exporter.grafanaDashboard.namespace | default .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- if .Values.exporter.grafanaDashboard.labels }}
{{- toYaml .Values.exporter.grafanaDashboard.labels | nindent 4}}
{{- end }}
data:
nv_dashboard.json: |
{{ .Files.Get "dashboards/nv_dashboard.json" | indent 4 }}
{{- end }}

View File

@ -0,0 +1,60 @@
{{- if .Values.exporter.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: neuvector-prometheus-exporter-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: 1
selector:
matchLabels:
app: neuvector-prometheus-exporter-pod
template:
metadata:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: "8068"
prometheus.io/scrape: "true"
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
app: neuvector-prometheus-exporter-pod
release: {{ .Release.Name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.leastPrivilege }}
serviceAccountName: basic
serviceAccount: basic
{{- end }}
containers:
- name: neuvector-prometheus-exporter-pod
{{ if eq .Values.registry "registry.neuvector.com" }}
{{ if .Values.oem }}
image: "{{ .Values.registry }}/{{ .Values.oem }}/prometheus-exporter:{{ .Values.exporter.image.tag }}"
{{- else }}
image: "{{ .Values.registry }}/prometheus-exporter:{{ .Values.exporter.image.tag }}"
{{- end }}
{{- else }}
image: {{ template "system_default_registry" . }}{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }}
{{- end }}
imagePullPolicy: Always
env:
- name: CTRL_API_SERVICE
value: {{ .Values.exporter.apiSvc }}
- name: EXPORTER_PORT
value: "8068"
envFrom:
- secretRef:
{{- if .Values.exporter.ctrlSercretName }}
name: {{ .Values.exporter.ctrlSercretName }}
{{ else }}
name: neuvector-prometheus-exporter-pod-secret
{{- end }}
restartPolicy: Always
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if and .Values.exporter.enabled .Values.exporter.svc.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: neuvector-prometheus-exporter
namespace: {{ .Release.Namespace }}
{{- with .Values.exporter.svc.annotations }}
annotations:
{{ toYaml . | nindent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app: neuvector-prometheus-exporter
spec:
type: {{ .Values.exporter.svc.type }}
{{- if and .Values.exporter.svc.loadBalancerIP (eq .Values.exporter.svc.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.exporter.svc.loadBalancerIP }}
{{- end }}
ports:
- port: 8068
name: metrics
targetPort: 8068
protocol: TCP
selector:
app: neuvector-prometheus-exporter-pod
{{- end }}

View File

@ -0,0 +1,39 @@
{{- if .Values.exporter.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: neuvector-prometheus-exporter
namespace: {{ .Release.Namespace }}
{{- with .Values.exporter.serviceMonitor.annotations }}
annotations:
{{ toYaml . | nindent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.exporter.serviceMonitor.labels }}
{{- toYaml .Values.exporter.serviceMonitor.labels | nindent 4}}
{{- end }}
spec:
selector:
matchLabels:
app: neuvector-prometheus-exporter
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: metrics
{{- if .Values.exporter.serviceMonitor.interval }}
interval: {{ .Values.exporter.serviceMonitor.interval }}
{{- end }}
path: "/metrics"
{{- if .Values.exporter.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml .Values.exporter.serviceMonitor.metricRelabelings | nindent 6 }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.relabelings }}
relabelings:
{{- toYaml .Values.exporter.serviceMonitor.relabelings | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if and (.Values.exporter.enabled) (not .Values.exporter.ctrlSercretName) -}}
apiVersion: v1
kind: Secret
metadata:
name: neuvector-prometheus-exporter-pod-secret
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
type: Opaque
data:
CTRL_USERNAME: {{ .Values.exporter.CTRL_USERNAME | b64enc | quote }}
CTRL_PASSWORD: {{ .Values.exporter.CTRL_PASSWORD | b64enc | quote }}
{{- end }}

View File

@ -0,0 +1,52 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
global:
cattle:
systemDefaultRegistry: ""
registry: docker.io
oem: ''
leastPrivilege: false
exporter:
# If false, exporter will not be installed
enabled: true
image:
repository: rancher/mirrored-neuvector-prometheus-exporter
tag: 5.2.4
# changes this to a readonly user !
CTRL_USERNAME: admin
CTRL_PASSWORD: admin
ctrlSercretName: ''
apiSvc: neuvector-svc-controller-api:10443
svc:
enabled: true
type: ClusterIP
loadBalancerIP: ''
annotations: {}
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "apps-subnet"
grafanaDashboard:
enabled: false
namespace: "" # Release namespace, if empty
labels: {}
serviceMonitor:
enabled: false
# labels for the ServiceMonitor.
labels: {}
# annotations for the ServiceMonitor.
annotations: {}
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
metricRelabelings: []
# RelabelConfigs to apply to samples before scraping
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
relabelings: []

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

Some files were not shown because too many files have changed in this diff Show More