[dev-v2.9] Add Longhorn 1.6.2 chart (#4207)

Signed-off-by: James Lu <james.lu@suse.com>
pull/4222/head
James Lu 2024-07-16 02:45:58 +08:00 committed by GitHub
parent b60e05b015
commit e22b1ceebc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
91 changed files with 16111 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/release-name: longhorn-crd
apiVersion: v1
appVersion: v1.6.2
description: Installs the CRDs for longhorn.
name: longhorn-crd
type: application
version: 104.1.0+up1.6.2

View File

@ -0,0 +1,2 @@
# longhorn-crd
A Rancher chart that installs the CRDs used by longhorn.

View File

@ -0,0 +1,66 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "longhorn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "longhorn.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "longhorn.managerIP" -}}
{{- $fullname := (include "longhorn.fullname" .) -}}
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "secret" }}
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
{{- end }}
{{- /*
longhorn.labels generates the standard Helm labels.
*/ -}}
{{- define "longhorn.labels" -}}
app.kubernetes.io/name: {{ template "longhorn.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.privateRegistry.registryUrl -}}
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
{{- else -}}
{{ include "system_default_registry" . }}
{{- end -}}
{{- end -}}
{{- /*
define the longhorn release namespace
*/ -}}
{{- define "release_namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,40 @@
annotations:
catalog.cattle.io/auto-install: longhorn-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Longhorn
catalog.cattle.io/kube-version: '>= 1.21.0-0'
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: longhorn
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.6.2
apiVersion: v1
appVersion: v1.6.2
description: Longhorn is a distributed block storage system for Kubernetes.
home: https://github.com/longhorn/longhorn
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
keywords:
- longhorn
- storage
- distributed
- block
- device
- iscsi
- nfs
kubeVersion: '>=1.21.0-0'
maintainers:
- email: maintainers@longhorn.io
name: Longhorn maintainers
name: longhorn
sources:
- https://github.com/longhorn/longhorn
- https://github.com/longhorn/longhorn-engine
- https://github.com/longhorn/longhorn-instance-manager
- https://github.com/longhorn/longhorn-share-manager
- https://github.com/longhorn/longhorn-manager
- https://github.com/longhorn/longhorn-ui
- https://github.com/longhorn/longhorn-tests
- https://github.com/longhorn/backing-image-manager
version: 104.1.0+up1.6.2

View File

@ -0,0 +1,50 @@
# Longhorn Chart
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/).
## Source Code
Longhorn is 100% open source software. Project source code is spread across a number of repos:
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
## Prerequisites
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
2. Kubernetes >= v1.21
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Uninstallation
To prevent Longhorn from being accidentally uninstalled (which leads to data lost), we introduce a new setting, deleting-confirmation-flag. If this flag is **false**, the Longhorn uninstallation job will fail. Set this flag to **true** to allow Longhorn uninstallation. You can set this flag using setting page in Longhorn UI or `kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag`
To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
---
Please see [link](https://github.com/longhorn/longhorn) for more information.

View File

@ -0,0 +1,27 @@
# Longhorn
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,920 @@
categories:
- storage
namespace: longhorn-system
questions:
- variable: image.defaultImage
default: "true"
description: "Use default Longhorn images"
label: Use Default Images
type: boolean
show_subquestion_if: false
group: "Longhorn Images"
subquestions:
- variable: image.longhorn.manager.repository
default: rancher/mirrored-longhornio-longhorn-manager
description: "Repository for the Longhorn Manager image."
type: string
label: Longhorn Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.manager.tag
default: v1.6.2
description: "Specify Longhorn Manager Image Tag"
type: string
label: Longhorn Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.repository
default: rancher/mirrored-longhornio-longhorn-engine
description: "Repository for the Longhorn Engine image."
type: string
label: Longhorn Engine Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.tag
default: v1.6.2
description: "Specify Longhorn Engine Image Tag"
type: string
label: Longhorn Engine Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.repository
default: rancher/mirrored-longhornio-longhorn-ui
description: "Repository for the Longhorn UI image."
type: string
label: Longhorn UI Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.tag
default: v1.6.2
description: "Specify Longhorn UI Image Tag"
type: string
label: Longhorn UI Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.repository
default: rancher/mirrored-longhornio-longhorn-instance-manager
description: "Repository for the Longhorn Instance Manager image."
type: string
label: Longhorn Instance Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.tag
default: v1.6.2
description: "Specify Longhorn Instance Manager Image Tag"
type: string
label: Longhorn Instance Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.repository
default: rancher/mirrored-longhornio-longhorn-share-manager
description: "Repository for the Longhorn Share Manager image."
type: string
label: Longhorn Share Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.tag
default: v1.6.2
description: "Specify Longhorn Share Manager Image Tag"
type: string
label: Longhorn Share Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.repository
default: rancher/mirrored-longhornio-backing-image-manager
description: "Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn Backing Image Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.tag
default: v1.6.2
description: "Specify Longhorn Backing Image Manager Image Tag"
type: string
label: Longhorn Backing Image Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.repository
default: rancher/mirrored-longhornio-support-bundle-kit
description: "Repository for the Longhorn Support Bundle Manager image."
type: string
label: Longhorn Support Bundle Kit Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.tag
default: v0.0.37
description: "Tag for the Longhorn Support Bundle Manager image."
type: string
label: Longhorn Support Bundle Kit Image Tag
group: "Longhorn Images Settings"
- variable: image.csi.attacher.repository
default: rancher/mirrored-longhornio-csi-attacher
description: "Repository for the CSI attacher image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Attacher Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.attacher.tag
default: v4.5.1
description: "Tag for the CSI attacher image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Attacher Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.repository
default: rancher/mirrored-longhornio-csi-provisioner
description: "Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Provisioner Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.tag
default: v3.6.4
description: "Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Provisioner Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.repository
default: rancher/mirrored-longhornio-csi-node-driver-registrar
description: "Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Node Driver Registrar Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.tag
default: v2.9.2
description: "Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Node Driver Registrar Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.repository
default: rancher/mirrored-longhornio-csi-resizer
description: "Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Resizer Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.tag
default: v1.10.1
description: "Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Resizer Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.repository
default: rancher/mirrored-longhornio-csi-snapshotter
description: "Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Snapshotter Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.tag
default: v6.3.4
description: "Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Snapshotter Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.repository
default: rancher/mirrored-longhornio-livenessprobe
description: "Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Liveness Probe Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.tag
default: v2.12.0
description: "Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Liveness Probe Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.openshift.oauthProxy.repository
default: rancher/mirrored-longhornio-openshift-origin-oauth-proxy
description: "Repository for the OAuth Proxy image. This setting applies only to OpenShift users"
type: string
label: OpenShift OAuth Proxy Image Repository
group: "OpenShift Images"
- variable: image.openshift.oauthProxy.tag
default: 4.14
description: "Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later."
type: string
label: OpenShift OAuth Proxy Image Tag
group: "OpenShift Images"
- variable: privateRegistry.registryUrl
label: Private registry URL
description: "URL of a private registry. When unspecified, Longhorn uses the default system registry."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.registrySecret
label: Private registry secret name
description: "Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.createSecret
default: "true"
description: "Setting that allows you to create a private registry secret."
type: boolean
group: "Private Registry Settings"
label: Create Secret for Private Registry Settings
show_subquestion_if: true
subquestions:
- variable: privateRegistry.registryUser
label: Private registry user
description: "User account used for authenticating with a private registry."
type: string
default: ""
- variable: privateRegistry.registryPasswd
label: Private registry password
description: "Password for authenticating with a private registry."
type: password
default: ""
- variable: longhorn.default_setting
default: "false"
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
label: "Customize Default Settings"
type: boolean
show_subquestion_if: true
group: "Longhorn Default Settings"
subquestions:
- variable: csi.kubeletRootDir
default:
description: "kubelet root directory. When unspecified, Longhorn uses the default value."
type: string
label: Kubelet Root Directory
group: "Longhorn CSI Driver Settings"
- variable: csi.attacherReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Attacher replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.provisionerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Provisioner replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.resizerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Resizer replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.snapshotterReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Snapshotter replica count
group: "Longhorn CSI Driver Settings"
- variable: defaultSettings.backupTarget
label: Backup Target
description: "Endpoint used to access the backupstore. (Options: \"NFS\", \"CIFS\", \"AWS\", \"GCP\", \"AZURE\")"
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.backupTargetCredentialSecret
label: Backup Target Credential Secret
description: "Name of the Kubernetes secret associated with the backup target."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
label: Allow Recurring Job While Volume Is Detached
description: 'Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.snapshotMaxCount
label: Snapshot Maximum Count
description: 'Maximum snapshot count for a volume. The value should be between 2 to 250.'
group: "Longhorn Default Settings"
type: int
min: 2
max: 250
default: 250
- variable: defaultSettings.createDefaultDiskLabeledNodes
label: Create Default Disk on Labeled Nodes
description: 'Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.defaultDataPath
label: Default Data Path
description: 'Default path for storing data on a host. The default value is "/var/lib/longhorn/".'
group: "Longhorn Default Settings"
type: string
default: "/var/lib/longhorn/"
- variable: defaultSettings.defaultDataLocality
label: Default Data Locality
description: 'Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: defaultSettings.replicaSoftAntiAffinity
label: Replica Node Level Soft Anti-Affinity
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default, false.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.replicaAutoBalance
label: Replica Auto Balance
description: 'Enable this setting automatically re-balances replicas when discovered an available node.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "least-effort"
- "best-effort"
default: "disabled"
- variable: defaultSettings.storageOverProvisioningPercentage
label: Storage Over Provisioning Percentage
description: "Percentage of storage that can be allocated relative to hard drive capacity. The default value is 100."
group: "Longhorn Default Settings"
type: int
min: 0
default: 100
- variable: defaultSettings.storageMinimalAvailablePercentage
label: Storage Minimal Available Percentage
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default, 25."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 25
- variable: defaultSettings.storageReservedPercentageForDefaultDisk
label: Storage Reserved Percentage For Default Disk
description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 30
- variable: defaultSettings.upgradeChecker
label: Enable Upgrade Checker
description: 'Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.defaultReplicaCount
label: Default Replica Count
description: "Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is \"3\"."
group: "Longhorn Default Settings"
type: int
min: 1
max: 20
default: 3
- variable: defaultSettings.defaultLonghornStaticStorageClass
label: Default Longhorn Static StorageClass Name
description: "Default Longhorn StorageClass. \"storageClassName\" is assigned to PVs and PVCs that are created for an existing Longhorn volume. \"storageClassName\" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is \"longhorn-static\"."
group: "Longhorn Default Settings"
type: string
default: "longhorn-static"
- variable: defaultSettings.backupstorePollInterval
label: Backupstore Poll Interval
description: "Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is \"300\". When the value is \"0\", polling is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.failedBackupTTL
label: Failed Backup Time to Live
description: "Number of minutes that Longhorn keeps a failed backup resource. When the value is \"0\", automatic deletion is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1440
- variable: defaultSettings.restoreVolumeRecurringJobs
label: Restore Volume Recurring Jobs
description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.recurringSuccessfulJobsHistoryLimit
label: Cronjob Successful Jobs History Limit
description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.recurringFailedJobsHistoryLimit
label: Cronjob Failed Jobs History Limit
description: 'Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.'
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.recurringJobMaxRetention
label: Maximum Retention Number for Recurring Job
description: "Maximum number of snapshots or backups to be retained."
group: "Longhorn Default Settings"
type: int
default: 100
- variable: defaultSettings.supportBundleFailedHistoryLimit
label: SupportBundle Failed History Limit
description: "This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.autoSalvage
label: Automatic salvage
description: "Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
description: 'Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.disableSchedulingOnCordonedNode
label: Disable Scheduling On Cordoned Node
description: "Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.replicaZoneSoftAntiAffinity
label: Replica Zone Level Soft Anti-Affinity
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By, default true."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.replicaDiskSoftAntiAffinity
label: Replica Disk Level Soft Anti-Affinity
description: 'Allow scheduling on disks with existing healthy replicas of the same volume. By default, true.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.allowEmptyNodeSelectorVolume
label: Allow Empty Node Selector Volume
description: "Setting that allows scheduling of empty node selector volumes to any node."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.allowEmptyDiskSelectorVolume
label: Allow Empty Disk Selector Volume
description: "Setting that allows scheduling of empty disk selector volumes to any disk."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.nodeDownPodDeletionPolicy
label: Pod Deletion Policy When Node is Down
description: "Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed."
group: "Longhorn Default Settings"
type: enum
options:
- "do-nothing"
- "delete-statefulset-pod"
- "delete-deployment-pod"
- "delete-both-statefulset-and-deployment-pod"
default: "do-nothing"
- variable: defaultSettings.nodeDrainPolicy
label: Node Drain Policy
description: "Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained."
group: "Longhorn Default Settings"
type: enum
options:
- "block-for-eviction"
- "block-for-eviction-if-contains-last-replica"
- "block-if-contains-last-replica"
- "allow-if-replica-is-stopped"
- "always-allow"
default: "block-if-contains-last-replica"
- variable: defaultSettings.detachManuallyAttachedVolumesWhenCordoned
label: Detach Manually Attached Volumes When Cordoned
description: "Setting that allows automatic detaching of manually-attached volumes when a node is cordoned."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.priorityClass
label: Priority Class
description: "PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Longhorn system contains user deployed components (E.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (E.g, instance manager, engine image, CSI driver, etc.) Note that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
group: "Longhorn Default Settings"
type: string
default: "longhorn-critical"
- variable: defaultSettings.replicaReplenishmentWaitInterval
label: Replica Replenishment Wait Interval
description: "The interval in seconds determines how long Longhorn will at least wait to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume."
group: "Longhorn Default Settings"
type: int
min: 0
default: 600
- variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
label: Concurrent Replica Rebuild Per Node Limit
description: "Maximum number of replicas that can be concurrently rebuilt on each node.
WARNING:
- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
label: Concurrent Volume Backup Restore Per Node Limit
description: "Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is \"0\", restoration of volumes using a backup is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.disableRevisionCounter
label: Disable Revision Counter
description: "Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the \"volume-head-xxx.img\" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.systemManagedPodsImagePullPolicy
label: System Managed Pod Image Pull Policy
description: "Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart."
group: "Longhorn Default Settings"
type: enum
options:
- "if-not-present"
- "always"
- "never"
default: "if-not-present"
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
label: Allow Volume Creation with Degraded Availability
description: "Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
label: Automatically Cleanup System Generated Snapshot
description: "Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoCleanupRecurringJobBackupSnapshot
label: Automatically Cleanup Recurring Job Backup Snapshot
description: "Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
label: Concurrent Automatic Engine Upgrade Per Node Limit
description: "Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is \"0\", Longhorn does not automatically upgrade volume engines to the new default engine image version."
group: "Longhorn Default Settings"
type: int
min: 0
default: 0
- variable: defaultSettings.backingImageCleanupWaitInterval
label: Backing Image Cleanup Wait Interval
description: "Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it."
group: "Longhorn Default Settings"
type: int
min: 0
default: 60
- variable: defaultSettings.backingImageRecoveryWaitInterval
label: Backing Image Recovery Wait Interval
description: "Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to \"failed\" or \"unknown\"."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.guaranteedInstanceManagerCPU
label: Guaranteed Instance Manager CPU
description: "Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is \"12\".
WARNING:
- Value 0 means removing the CPU requests from spec of instance manager pods.
- Considering the possible number of new instance manager pods in a further system upgrade, this integer value ranges from 0 to 40.
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
- This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
- After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
group: "Longhorn Default Settings"
type: int
min: 0
max: 40
default: 12
- variable: defaultSettings.logLevel
label: Log Level
description: 'Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")'
group: "Longhorn Default Settings"
type: string
default: "Info"
- variable: defaultSettings.disableSnapshotPurge
label: Disable Snapshot Purge
description: "Setting that temporarily prevents all attempts to purge volume snapshots."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
label: Kubernetes Cluster Autoscaler Enabled (Experimental)
description: "Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
WARNING:
- Replica rebuilding could be expensive because nodes with reusable replicas could get removed by the Kubernetes Cluster Autoscaler."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.orphanAutoDeletion
label: Orphaned Data Cleanup
description: "Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.storageNetwork
label: Storage Network
description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
WARNING:
- This setting should change after detaching all Longhorn volumes, as some of the Longhorn system component pods will get recreated to apply the setting. Longhorn will try to block this setting update when there are attached volumes."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.deletingConfirmationFlag
label: Deleting Confirmation Flag
description: "Flag that prevents accidental uninstallation of Longhorn."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.engineReplicaTimeout
label: Timeout between Engine and Replica
description: "Timeout between the Longhorn Engine and replicas. Specify a value between \"8\" and \"30\" seconds. The default value is \"8\"."
group: "Longhorn Default Settings"
type: int
default: "8"
- variable: defaultSettings.snapshotDataIntegrity
label: Snapshot Data Integrity
description: "This setting allows users to enable or disable snapshot hashing and data integrity checking."
group: "Longhorn Default Settings"
type: string
default: "disabled"
- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation
label: Immediate Snapshot Data Integrity Check After Creating a Snapshot
description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.snapshotDataIntegrityCronjob
label: Snapshot Data Integrity Check CronJob
description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files."
group: "Longhorn Default Settings"
type: string
default: "0 0 */7 * *"
- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
label: Remove Snapshots During Filesystem Trim
description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.fastReplicaRebuildEnabled
label: Fast Replica Rebuild Enabled
description: "Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to \"enable\" or \"fast-check\"."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.replicaFileSyncHttpClientTimeout
label: Timeout of HTTP Client to Replica File Sync Server
description: "In seconds. The setting specifies the HTTP client timeout to the file sync server."
group: "Longhorn Default Settings"
type: int
default: "30"
- variable: defaultSettings.backupCompressionMethod
label: Backup Compression Method
description: "Setting that allows you to specify a backup compression method."
group: "Longhorn Default Settings"
type: string
default: "lz4"
- variable: defaultSettings.backupConcurrentLimit
label: Backup Concurrent Limit Per Backup
description: "Maximum number of worker threads that can concurrently run for each backup."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.restoreConcurrentLimit
label: Restore Concurrent Limit Per Backup
description: "This setting controls how many worker threads per restore concurrently."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.allowCollectingLonghornUsageMetrics
label: Allow Collecting Longhorn Usage Metrics
description: "Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses."
group: "Longhorn Default Settings"
type: boolean
default: true
- variable: defaultSettings.v1DataEngine
label: V1 Data Engine
description: "Setting that allows you to enable the V1 Data Engine."
group: "Longhorn V1 Data Engine Settings"
type: boolean
default: true
- variable: defaultSettings.v2DataEngine
label: V2 Data Engine
description: "Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments.
WARNING:
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
- When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
type: boolean
default: false
- variable: defaultSettings.v2DataEngineHugepageLimit
label: V2 Data Engine
description: "This allows users to configure maximum huge page size (in MiB) for the V2 Data Engine."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
type: int
default: "2048"
- variable: defaultSettings.offlineReplicaRebuilding
label: Offline Replica Rebuilding
description: "Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
required: true
type: enum
options:
- "enabled"
- "disabled"
default: "enabled"
- variable: persistence.defaultClass
default: "true"
description: "Setting that allows you to specify the default Longhorn StorageClass."
label: Default Storage Class
group: "Longhorn Storage Class Settings"
required: true
type: boolean
- variable: persistence.reclaimPolicy
label: Storage Class Retain Policy
description: "Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: \"Retain\", \"Delete\")"
group: "Longhorn Storage Class Settings"
required: true
type: enum
options:
- "Delete"
- "Retain"
default: "Delete"
- variable: persistence.defaultClassReplicaCount
description: "Replica count of the default Longhorn StorageClass."
label: Default Storage Class Replica Count
group: "Longhorn Storage Class Settings"
type: int
min: 1
max: 10
default: 3
- variable: persistence.defaultDataLocality
description: "Data locality of the default Longhorn StorageClass. (Options: \"disabled\", \"best-effort\")"
label: Default Storage Class Data Locality
group: "Longhorn Storage Class Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: persistence.recurringJobSelector.enable
description: "Setting that allows you to enable the recurring job selector for a Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Recurring Job Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.recurringJobSelector.jobList
description: 'Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)'
label: Storage Class Recurring Job Selector List
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.defaultNodeSelector.enable
description: "Setting that allows you to enable the node selector for the default Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Node Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.defaultNodeSelector.selector
label: Storage Class Node Selector
description: 'Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")'
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.enable
description: "Setting that allows you to use a backing image in a Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Default Storage Class Backing Image
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.backingImage.name
description: 'Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.'
label: Storage Class Backing Image Name
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.expectedChecksum
description: 'Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- It is not recommended to set this field if the data source type is \"export-from-volume\".'
label: Storage Class Backing Image Expected SHA512 Checksum
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.dataSourceType
description: 'Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
label: Storage Class Backing Image Data Source Type
group: "Longhorn Storage Class Settings"
type: enum
options:
- ""
- "download"
- "upload"
- "export-from-volume"
default: ""
- variable: persistence.backingImage.dataSourceParameters
description: "Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- Be careful of the quotes here."
label: Storage Class Backing Image Data Source Parameters
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.removeSnapshotsDuringFilesystemTrim
description: "Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: \"ignored\", \"enabled\", \"disabled\")"
label: Default Storage Class Remove Snapshots During Filesystem Trim
group: "Longhorn Storage Class Settings"
type: enum
options:
- "ignored"
- "enabled"
- "disabled"
default: "ignored"
- variable: ingress.enabled
default: "false"
description: "Expose app using Layer 7 Load Balancer - ingress"
type: boolean
group: "Services and Load Balancing"
label: Expose app using Layer 7 Load Balancer
show_subquestion_if: true
subquestions:
- variable: ingress.host
default: "xip.io"
description: "Hostname of the Layer 7 load balancer."
type: hostname
required: true
label: Layer 7 Load Balancer Hostname
- variable: ingress.path
default: "/"
description: "Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}."
type: string
required: true
label: Ingress Path
- variable: service.ui.type
default: "Rancher-Proxy"
description: "Service type for Longhorn UI. (Options: \"ClusterIP\", \"NodePort\", \"LoadBalancer\", \"Rancher-Proxy\")"
type: enum
options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"
- "Rancher-Proxy"
label: Longhorn UI Service
show_if: "ingress.enabled=false"
group: "Services and Load Balancing"
show_subquestion_if: "NodePort"
subquestions:
- variable: service.ui.nodePort
default: ""
description: "NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767."
type: int
min: 30000
max: 32767
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
label: UI Service NodePort number
- variable: enablePSP
default: "false"
description: "Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled."
label: Pod Security Policy
type: boolean
group: "Other Settings"
- variable: global.cattle.windowsCluster.enabled
default: "false"
description: "Setting that allows Longhorn to run on a Rancher Windows cluster."
label: Rancher Windows Cluster
type: boolean
group: "Other Settings"
- variable: networkPolicies.enabled
description: "Setting that allows you to enable network policies that control access to Longhorn pods.
Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added."
group: "Other Settings"
label: Network Policies
default: "false"
type: boolean
subquestions:
- variable: networkPolicies.type
label: Network Policies for Ingress
description: "Distribution that determines the policy for allowing access for an ingress. (Options: \"k3s\", \"rke2\", \"rke1\")"
show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
type: enum
default: "rke2"
options:
- "rke1"
- "rke2"
- "k3s"
- variable: defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU
label: Guaranteed Instance Manager CPU for V2 Data Engine
description: 'Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
WARNING:
- Specifying a value of 0 disables CPU requests for instance manager pods. You must specify an integer between 1000 and 8000.
- This is a global setting. Modifying the value triggers an automatic restart of the instance manager pods. Do not modify the value while volumes are still attached."
group: "Longhorn Default Settings'
type: int
min: 1000
max: 8000
default: 1250

View File

@ -0,0 +1,5 @@
Longhorn is now installed on the cluster!
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
Visit our documentation at https://longhorn.io/docs/

View File

@ -0,0 +1,66 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "longhorn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "longhorn.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "longhorn.managerIP" -}}
{{- $fullname := (include "longhorn.fullname" .) -}}
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "secret" }}
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
{{- end }}
{{- /*
longhorn.labels generates the standard Helm labels.
*/ -}}
{{- define "longhorn.labels" -}}
app.kubernetes.io/name: {{ template "longhorn.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.privateRegistry.registryUrl -}}
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
{{- else -}}
{{ include "system_default_registry" . }}
{{- end -}}
{{- end -}}
{{- /*
define the longhorn release namespace
*/ -}}
{{- define "release_namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,77 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups: [""]
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"]
verbs: ["*"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["daemonsets", "statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets", "podsecuritypolicies"]
verbs: ["*"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
verbs: ["*"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
verbs: ["*"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
{{- if .Values.openshift.enabled }}
"engineimages/finalizers", "nodes/finalizers", "instancemanagers/finalizers",
{{- end }}
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status", "backupbackingimages", "backupbackingimages/status"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
verbs: ["get", "list", "create", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"]
verbs: ["*"]
{{- if .Values.openshift.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-ocp-privileged-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
rules:
- apiGroups: ["security.openshift.io"]
resources: ["securitycontextconstraints"]
resourceNames: ["anyuid", "privileged"]
verbs: ["use"]
{{- end }}

View File

@ -0,0 +1,49 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-bind
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-support-bundle
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}
{{- if .Values.openshift.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-ocp-privileged-bind
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-ocp-privileged-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: longhorn-ui-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: default # supportbundle-agent-support-bundle uses default sa
namespace: {{ include "release_namespace" . }}
{{- end }}

View File

@ -0,0 +1,167 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-manager
namespace: {{ include "release_namespace" . }}
spec:
selector:
matchLabels:
app: longhorn-manager
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-manager
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true
command:
- longhorn-manager
- -d
{{- if eq .Values.longhornManager.log.format "json" }}
- -j
{{- end }}
- daemon
- --engine-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
- --instance-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
- --share-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
- --backing-image-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
- --support-bundle-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}"
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --service-account
- longhorn-service-account
{{- if .Values.preUpgradeChecker.upgradeVersionCheck}}
- --upgrade-version-check
{{- end }}
ports:
- containerPort: 9500
name: manager
- containerPort: 9501
name: conversion-wh
- containerPort: 9502
name: admission-wh
- containerPort: 9503
name: recov-backend
readinessProbe:
httpGet:
path: /v1/healthz
port: 9501
scheme: HTTPS
volumeMounts:
- name: dev
mountPath: /host/dev/
- name: proc
mountPath: /host/proc/
- name: longhorn
mountPath: /var/lib/longhorn/
mountPropagation: Bidirectional
- name: longhorn-grpc-tls
mountPath: /tls-files/
{{- if .Values.enableGoCoverDir }}
- name: go-cover-dir
mountPath: /go-cover-dir/
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.enableGoCoverDir }}
- name: GOCOVERDIR
value: /go-cover-dir/
{{- end }}
volumes:
- name: dev
hostPath:
path: /dev/
- name: proc
hostPath:
path: /proc/
- name: longhorn
hostPath:
path: /var/lib/longhorn/
{{- if .Values.enableGoCoverDir }}
- name: go-cover-dir
hostPath:
path: /go-cover-dir/
type: DirectoryOrCreate
{{- end }}
- name: longhorn-grpc-tls
secret:
secretName: longhorn-grpc-tls
optional: true
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-backend
namespace: {{ include "release_namespace" . }}
{{- if .Values.longhornManager.serviceAnnotations }}
annotations:
{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.manager.type }}
selector:
app: longhorn-manager
ports:
- name: manager
port: 9500
targetPort: manager
{{- if .Values.service.manager.nodePort }}
nodePort: {{ .Values.service.manager.nodePort }}
{{- end }}

View File

@ -0,0 +1,229 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
default-setting.yaml: |-
{{- if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }}
backup-target: {{ .Values.defaultSettings.backupTarget }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }}
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }}
allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }}
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }}
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }}
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }}
replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }}
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }}
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }}
storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }}
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }}
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}
default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}
failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }}
restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }}
recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringJobMaxRetention) }}
recurring-job-max-retention: {{ .Values.defaultSettings.recurringJobMaxRetention }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }}
recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }}
support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
{{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
{{- end -}}
{{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
{{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}
priority-class: {{ .Values.defaultSettings.priorityClass }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}
auto-salvage: {{ .Values.defaultSettings.autoSalvage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}
auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }}
disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}
replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaDiskSoftAntiAffinity) }}
replica-disk-soft-anti-affinity: {{ .Values.defaultSettings.replicaDiskSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}
node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }}
node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned) }}
detach-manually-attached-volumes-when-cordoned: {{ .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}
replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }}
concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }}
concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }}
disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }}
system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }}
allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }}
auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot) }}
auto-cleanup-recurring-job-backup-snapshot: {{ .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }}
concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }}
backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }}
backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }}
guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }}
kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }}
orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }}
storage-network: {{ .Values.defaultSettings.storageNetwork }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }}
deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }}
engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }}
snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }}
snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }}
snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }}
remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }}
fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }}
replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.logLevel) }}
log-level: {{ .Values.defaultSettings.logLevel }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }}
backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }}
backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }}
restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v1DataEngine) }}
v1-data-engine: {{ .Values.defaultSettings.v1DataEngine }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }}
v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineHugepageLimit) }}
v2-data-engine-hugepage-limit: {{ .Values.defaultSettings.v2DataEngineHugepageLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.offlineReplicaRebuilding) }}
offline-replica-rebuilding: {{ .Values.defaultSettings.offlineReplicaRebuilding }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyNodeSelectorVolume) }}
allow-empty-node-selector-volume: {{ .Values.defaultSettings.allowEmptyNodeSelectorVolume }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyDiskSelectorVolume) }}
allow-empty-disk-selector-volume: {{ .Values.defaultSettings.allowEmptyDiskSelectorVolume }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowCollectingLonghornUsageMetrics) }}
allow-collecting-longhorn-usage-metrics: {{ .Values.defaultSettings.allowCollectingLonghornUsageMetrics }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableSnapshotPurge) }}
disable-snapshot-purge: {{ .Values.defaultSettings.disableSnapshotPurge }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU) }}
v2-data-engine-guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotMaxCount) }}
snapshot-max-count: {{ .Values.defaultSettings.snapshotMaxCount }}
{{- end }}

View File

@ -0,0 +1,132 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-driver-deployer
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-driver-deployer
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --manager-url
- http://longhorn-backend:9500/v1
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{- if .Values.csi.kubeletRootDir }}
- name: KUBELET_ROOT_DIR
value: {{ .Values.csi.kubeletRootDir }}
{{- end }}
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
- name: CSI_ATTACHER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
{{- end }}
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
- name: CSI_PROVISIONER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
{{- end }}
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
{{- end }}
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
- name: CSI_RESIZER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
{{- end }}
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
- name: CSI_SNAPSHOTTER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
{{- end }}
{{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
- name: CSI_LIVENESS_PROBE_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
{{- end }}
{{- if .Values.csi.attacherReplicaCount }}
- name: CSI_ATTACHER_REPLICA_COUNT
value: {{ .Values.csi.attacherReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.provisionerReplicaCount }}
- name: CSI_PROVISIONER_REPLICA_COUNT
value: {{ .Values.csi.provisionerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.resizerReplicaCount }}
- name: CSI_RESIZER_REPLICA_COUNT
value: {{ .Values.csi.resizerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.snapshotterReplicaCount }}
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
{{- end }}
{{- if .Values.enableGoCoverDir }}
- name: GOCOVERDIR
value: /go-cover-dir/
volumeMounts:
- name: go-cover-dir
mountPath: /go-cover-dir/
{{- end }}
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
securityContext:
runAsUser: 0
{{- if .Values.enableGoCoverDir }}
volumes:
- name: go-cover-dir
hostPath:
path: /go-cover-dir/
type: DirectoryOrCreate
{{- end }}

View File

@ -0,0 +1,182 @@
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
# https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml
# Create a proxy service account and ensure it will use the route "proxy"
# Create a secure connection to the proxy via a route
apiVersion: route.openshift.io/v1
kind: Route
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: {{ .Values.openshift.ui.route }}
namespace: {{ include "release_namespace" . }}
spec:
to:
kind: Service
name: longhorn-ui
tls:
termination: reencrypt
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
annotations:
service.alpha.openshift.io/serving-cert-secret-name: longhorn-ui-tls
spec:
ports:
- name: longhorn-ui
port: {{ .Values.openshift.ui.port | default 443 }}
targetPort: {{ .Values.openshift.ui.proxy | default 8443 }}
selector:
app: longhorn-ui
---
{{- end }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
spec:
replicas: {{ .Values.longhornUI.replicas }}
selector:
matchLabels:
app: longhorn-ui
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-ui
spec:
serviceAccountName: longhorn-ui-service-account
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- longhorn-ui
topologyKey: kubernetes.io/hostname
containers:
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
- name: oauth-proxy
image: {{ template "registry_url" . }}{{ .Values.image.openshift.oauthProxy.repository }}:{{ .Values.image.openshift.oauthProxy.tag }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: {{ .Values.openshift.ui.proxy | default 8443 }}
name: public
args:
- --https-address=:{{ .Values.openshift.ui.proxy | default 8443 }}
- --provider=openshift
- --openshift-service-account=longhorn-ui-service-account
- --upstream=http://localhost:8000
- --tls-cert=/etc/tls/private/tls.crt
- --tls-key=/etc/tls/private/tls.key
- --cookie-secret=SECRET
- --openshift-sar={"namespace":"{{ include "release_namespace" . }}","group":"longhorn.io","resource":"setting","verb":"delete"}
volumeMounts:
- mountPath: /etc/tls/private
name: longhorn-ui-tls
{{- end }}
{{- end }}
- name: longhorn-ui
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
ports:
- containerPort: 8000
name: http
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
- name: LONGHORN_UI_PORT
value: "8000"
volumes:
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
- name: longhorn-ui-tls
secret:
secretName: longhorn-ui-tls
{{- end }}
{{- end }}
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornUI.priorityClass }}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
---
kind: Service
apiVersion: v1
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
kubernetes.io/cluster-service: "true"
{{- end }}
name: longhorn-frontend
namespace: {{ include "release_namespace" . }}
spec:
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
type: ClusterIP
{{- else }}
type: {{ .Values.service.ui.type }}
{{- end }}
{{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
selector:
app: longhorn-ui
ports:
- name: http
port: 80
targetPort: http
{{- if .Values.service.ui.nodePort }}
nodePort: {{ .Values.service.ui.nodePort }}
{{- else }}
nodePort: null
{{- end }}

View File

@ -0,0 +1,37 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ingress
annotations:
{{- if .Values.ingress.secureBackends }}
ingress.kubernetes.io/secure-backends: "true"
{{- end }}
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ default "" .Values.ingress.path }}
pathType: ImplementationSpecific
backend:
service:
name: longhorn-frontend
port:
number: 80
{{- if .Values.ingress.tls }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Values.ingress.tlsSecret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-data-source
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: instance-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: instance-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
app: longhorn-ui
- podSelector:
matchLabels:
app: longhorn-csi-plugin
- podSelector:
matchLabels:
longhorn.io/managed-by: longhorn-manager
matchExpressions:
- { key: recurring-job.longhorn.io, operator: Exists }
- podSelector:
matchExpressions:
- { key: longhorn.io/job-task, operator: Exists }
- podSelector:
matchLabels:
app: longhorn-driver-deployer
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-recovery-backend
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9503
{{- end }}

View File

@ -0,0 +1,46 @@
{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-ui-frontend
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-ui
policyTypes:
- Ingress
ingress:
- from:
{{- if eq .Values.networkPolicies.type "rke1"}}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: ingress-nginx
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
{{- else if eq .Values.networkPolicies.type "rke2" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: rke2-ingress-nginx
app.kubernetes.io/name: rke2-ingress-nginx
{{- else if eq .Values.networkPolicies.type "k3s" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ports:
- port: 8000
protocol: TCP
- port: 80
protocol: TCP
{{- end }}
{{- end }}

View File

@ -0,0 +1,33 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-conversion-webhook
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9501
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-admission-webhook
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9502
{{- end }}

View File

@ -0,0 +1,56 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
name: longhorn-post-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-post-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-post-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- post-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if and .Values.preUpgradeChecker.jobEnabled .Values.preUpgradeChecker.upgradeVersionCheck}}
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
name: longhorn-pre-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-pre-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-pre-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- pre-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,9 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: "longhorn-critical"
labels: {{- include "longhorn.labels" . | nindent 4 }}
description: "Ensure Longhorn pods have the highest priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure"
globalDefault: false
preemptionPolicy: PreemptLowerPriority
value: 1000000000

View File

@ -0,0 +1,66 @@
{{- if .Values.enablePSP }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: longhorn-psp
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- NET_RAW
allowedCapabilities:
- SYS_ADMIN
hostNetwork: false
hostIPC: false
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
fsGroup:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- secret
- projected
- hostPath
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: longhorn-psp-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- longhorn-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: longhorn-psp-binding
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: longhorn-psp-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: default
namespace: {{ include "release_namespace" . }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.privateRegistry.createSecret }}
{{- if .Values.privateRegistry.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.privateRegistry.registrySecret }}
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "secret" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-ui-service-account
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
{{- if not .Values.serviceAccount.annotations }}
annotations:
{{- end }}
serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"longhorn-ui"}}'
{{- end }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if .Values.metrics.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: longhorn-prometheus-servicemonitor
namespace: {{ include "release_namespace" . }}
labels:
{{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-prometheus-servicemonitor
spec:
selector:
matchLabels:
app: longhorn-manager
namespaceSelector:
matchNames:
- {{ include "release_namespace" . }}
endpoints:
- port: manager
{{- end }}

View File

@ -0,0 +1,71 @@
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-conversion-webhook
name: longhorn-conversion-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: conversion-webhook
port: 9501
targetPort: conversion-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-admission-webhook
name: longhorn-admission-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: admission-webhook
port: 9502
targetPort: admission-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-recovery-backend
name: longhorn-recovery-backend
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: recovery-backend
port: 9503
targetPort: recov-backend
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-engine-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: engine
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-replica-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: replica

View File

@ -0,0 +1,50 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-storageclass
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
storageclass.yaml: |
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
annotations:
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
staleReplicaTimeout: "30"
fromBackup: ""
{{- if .Values.persistence.defaultFsType }}
fsType: "{{ .Values.persistence.defaultFsType }}"
{{- end }}
{{- if .Values.persistence.defaultMkfsParams }}
mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}"
{{- end }}
{{- if .Values.persistence.migratable }}
migratable: "{{ .Values.persistence.migratable }}"
{{- end }}
{{- if .Values.persistence.nfsOptions }}
nfsOptions: "{{ .Values.persistence.nfsOptions }}"
{{- end }}
{{- if .Values.persistence.backingImage.enable }}
backingImage: {{ .Values.persistence.backingImage.name }}
backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
{{- end }}
{{- if .Values.persistence.recurringJobSelector.enable }}
recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
{{- end }}
dataLocality: {{ .Values.persistence.defaultDataLocality | quote }}
{{- if .Values.persistence.defaultNodeSelector.enable }}
nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}"
{{- end }}
{{- if .Values.persistence.removeSnapshotsDuringFilesystemTrim }}
unmapMarkSnapChainRemoved: "{{ .Values.persistence.removeSnapshotsDuringFilesystemTrim }}"
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
namespace: {{ include "release_namespace" $ }}
labels: {{- include "longhorn.labels" $ | nindent 4 }}
app: longhorn
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
name: longhorn-uninstall
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-uninstall
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-uninstall
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- uninstall
- --force
env:
- name: LONGHORN_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "get", "list", "watch" ]

View File

@ -0,0 +1,35 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageDataSource" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Backup" false -}}
# {{- set $found "longhorn.io/v1beta2/BackupBackingImage" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupTarget" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupVolume" false -}}
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
# {{- set $found "longhorn.io/v1beta2/Orphan" false -}}
# {{- set $found "longhorn.io/v1beta1/RecurringJob" false -}}
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
# {{- set $found "longhorn.io/v1beta2/Snapshot" false -}}
# {{- set $found "longhorn.io/v1beta2/SupportBundle" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemBackup" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemRestore" false -}}
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
# {{- set $found "longhorn.io/v1beta2/VolumeAttachment" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.enablePSP }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,484 @@
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
# -- Default system registry.
systemDefaultRegistry: ""
windowsCluster:
# -- Setting that allows Longhorn to run on a Rancher Windows cluster.
enabled: false
# -- Toleration for Linux nodes that can run user-deployed Longhorn components.
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# -- Node selector for Linux nodes that can run user-deployed Longhorn components.
nodeSelector:
kubernetes.io/os: "linux"
defaultSetting:
# -- Toleration for system-managed Longhorn components.
taintToleration: cattle.io/os=linux:NoSchedule
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
networkPolicies:
# -- Setting that allows you to enable network policies that control access to Longhorn pods.
enabled: false
# -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
type: "k3s"
image:
longhorn:
engine:
# -- Repository for the Longhorn Engine image.
repository: rancher/mirrored-longhornio-longhorn-engine
# -- Specify Longhorn engine image tag
tag: v1.6.2
manager:
# -- Repository for the Longhorn Manager image.
repository: rancher/mirrored-longhornio-longhorn-manager
# -- Specify Longhorn manager image tag
tag: v1.6.2
ui:
# -- Repository for the Longhorn UI image.
repository: rancher/mirrored-longhornio-longhorn-ui
# -- Specify Longhorn ui image tag
tag: v1.6.2
instanceManager:
# -- Repository for the Longhorn Instance Manager image.
repository: rancher/mirrored-longhornio-longhorn-instance-manager
# -- Specify Longhorn instance manager image tag
tag: v1.6.2
shareManager:
# -- Repository for the Longhorn Share Manager image.
repository: rancher/mirrored-longhornio-longhorn-share-manager
# -- Specify Longhorn share manager image tag
tag: v1.6.2
backingImageManager:
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-backing-image-manager
# -- Specify Longhorn backing image manager image tag
tag: v1.6.2
supportBundleKit:
# -- Repository for the Longhorn Support Bundle Manager image.
repository: rancher/mirrored-longhornio-support-bundle-kit
# -- Tag for the Longhorn Support Bundle Manager image.
tag: v0.0.37
csi:
attacher:
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-csi-attacher
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
tag: v4.5.1
provisioner:
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-csi-provisioner
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
tag: v3.6.4
nodeDriverRegistrar:
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
tag: v2.9.2
resizer:
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-csi-resizer
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
tag: v1.10.1
snapshotter:
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-csi-snapshotter
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
tag: v6.3.4
livenessProbe:
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
repository: rancher/mirrored-longhornio-livenessprobe
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
tag: v2.12.0
openshift:
oauthProxy:
# -- Repository for the OAuth Proxy image. This setting applies only to OpenShift users.
repository: rancher/mirrored-longhornio-openshift-origin-oauth-proxy
# -- Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14.
tag: 4.14
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
pullPolicy: IfNotPresent
service:
ui:
# -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
type: ClusterIP
# -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: null
manager:
# -- Service type for Longhorn Manager.
type: ClusterIP
# -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: ""
persistence:
# -- Setting that allows you to specify the default Longhorn StorageClass.
defaultClass: true
# -- Filesystem type of the default Longhorn StorageClass.
defaultFsType: ext4
# -- mkfs parameters of the default Longhorn StorageClass.
defaultMkfsParams: ""
# -- Replica count of the default Longhorn StorageClass.
defaultClassReplicaCount: 3
# -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
defaultDataLocality: disabled
# -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
reclaimPolicy: Delete
# -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
migratable: false
# -- Set NFS mount options for Longhorn StorageClass for RWX volumes
nfsOptions: ""
recurringJobSelector:
# -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
enable: false
# -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
jobList: []
backingImage:
# -- Setting that allows you to use a backing image in a Longhorn StorageClass.
enable: false
# -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
name: ~
# -- Data source type of a backing image used in a Longhorn StorageClass.
# If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
# If the backing image does not exist, Longhorn creates one using the specified data source type.
dataSourceType: ~
# -- Data source parameters of a backing image used in a Longhorn StorageClass.
# You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
dataSourceParameters: ~
# -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
expectedChecksum: ~
defaultNodeSelector:
# -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
enable: false
# -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
selector: ""
# -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
removeSnapshotsDuringFilesystemTrim: ignored
preUpgradeChecker:
# -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
jobEnabled: true
# -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
upgradeVersionCheck: true
csi:
# -- kubelet root directory. When unspecified, Longhorn uses the default value.
kubeletRootDir: ~
# -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
attacherReplicaCount: ~
# -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
provisionerReplicaCount: ~
# -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
resizerReplicaCount: ~
# -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
snapshotterReplicaCount: ~
defaultSettings:
# -- Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
backupTarget: ~
# -- Name of the Kubernetes secret associated with the backup target.
backupTargetCredentialSecret: ~
# -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
allowRecurringJobWhileVolumeDetached: ~
# -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
createDefaultDiskLabeledNodes: ~
# -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
defaultDataPath: ~
# -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
defaultDataLocality: ~
# -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
replicaSoftAntiAffinity: ~
# -- Setting that automatically rebalances replicas when an available node is discovered.
replicaAutoBalance: ~
# -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
storageOverProvisioningPercentage: ~
# -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
storageMinimalAvailablePercentage: ~
# -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
storageReservedPercentageForDefaultDisk: ~
# -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
upgradeChecker: ~
# -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
defaultReplicaCount: ~
# -- Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static".
defaultLonghornStaticStorageClass: ~
# -- Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
backupstorePollInterval: ~
# -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
failedBackupTTL: ~
# -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
restoreVolumeRecurringJobs: ~
# -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
recurringSuccessfulJobsHistoryLimit: ~
# -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
recurringFailedJobsHistoryLimit: ~
# -- Maximum number of snapshots or backups to be retained.
recurringJobMaxRetention: ~
# -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
supportBundleFailedHistoryLimit: ~
# -- Taint or toleration for system-managed Longhorn components.
# Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
taintToleration: ~
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: ~
# -- PriorityClass for system-managed Longhorn components.
# This setting can help prevent Longhorn components from being evicted under Node Pressure.
# Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
# -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
autoSalvage: ~
# -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
# -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
disableSchedulingOnCordonedNode: ~
# -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
replicaZoneSoftAntiAffinity: ~
# -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
replicaDiskSoftAntiAffinity: ~
# -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
nodeDownPodDeletionPolicy: ~
# -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
nodeDrainPolicy: ~
# -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
detachManuallyAttachedVolumesWhenCordoned: ~
# -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
replicaReplenishmentWaitInterval: ~
# -- Maximum number of replicas that can be concurrently rebuilt on each node.
concurrentReplicaRebuildPerNodeLimit: ~
# -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
concurrentVolumeBackupRestorePerNodeLimit: ~
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
disableRevisionCounter: ~
# -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
systemManagedPodsImagePullPolicy: ~
# -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
allowVolumeCreationWithDegradedAvailability: ~
# -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
autoCleanupSystemGeneratedSnapshot: ~
# -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
autoCleanupRecurringJobBackupSnapshot: ~
# -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
concurrentAutomaticEngineUpgradePerNodeLimit: ~
# -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
backingImageCleanupWaitInterval: ~
# -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
backingImageRecoveryWaitInterval: ~
# -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
guaranteedInstanceManagerCPU: ~
# -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
kubernetesClusterAutoscalerEnabled: ~
# -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
orphanAutoDeletion: ~
# -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
storageNetwork: ~
# -- Flag that prevents accidental uninstallation of Longhorn.
deletingConfirmationFlag: ~
# -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
engineReplicaTimeout: ~
# -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
snapshotDataIntegrity: ~
# -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
# -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
snapshotDataIntegrityCronjob: ~
# -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
removeSnapshotsDuringFilesystemTrim: ~
# -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
fastReplicaRebuildEnabled: ~
# -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
replicaFileSyncHttpClientTimeout: ~
# -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
logLevel: ~
# -- Setting that allows you to specify a backup compression method.
backupCompressionMethod: ~
# -- Maximum number of worker threads that can concurrently run for each backup.
backupConcurrentLimit: ~
# -- Maximum number of worker threads that can concurrently run for each restore operation.
restoreConcurrentLimit: ~
# -- Setting that allows you to enable the V1 Data Engine.
v1DataEngine: ~
# -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments.
v2DataEngine: ~
# -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
v2DataEngineHugepageLimit: ~
# -- Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine.
offlineReplicaRebuilding: ~
# -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
v2DataEngineGuaranteedInstanceManagerCPU: ~
# -- Setting that allows scheduling of empty node selector volumes to any node.
allowEmptyNodeSelectorVolume: ~
# -- Setting that allows scheduling of empty disk selector volumes to any disk.
allowEmptyDiskSelectorVolume: ~
# -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
allowCollectingLonghornUsageMetrics: ~
# -- Setting that temporarily prevents all attempts to purge volume snapshots.
disableSnapshotPurge: ~
# -- Maximum snapshot count for a volume. The value should be between 2 to 250
snapshotMaxCount: ~
privateRegistry:
# -- Setting that allows you to create a private registry secret.
createSecret: ~
# -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
registryUrl: ~
# -- User account used for authenticating with a private registry.
registryUser: ~
# -- Password for authenticating with a private registry.
registryPasswd: ~
# -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
registrySecret: ~
longhornManager:
log:
# -- Format of Longhorn Manager logs. (Options: "plain", "json")
format: plain
# -- PriorityClass for Longhorn Manager.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager.
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
# -- Annotation for the Longhorn Manager service.
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
# -- PriorityClass for Longhorn Driver.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
# -- Replica count for Longhorn UI.
replicas: 2
# -- PriorityClass for Longhorn UI.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
ingress:
# -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
enabled: false
# -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
# ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
ingressClassName: ~
# -- Hostname of the Layer 7 load balancer.
host: sslip.io
# -- Setting that allows you to enable TLS on ingress records.
tls: false
# -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
secureBackends: false
# -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
tlsSecret: longhorn.local-tls
# -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}.
path: /
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
# -- Ingress annotations in the form of key-value pairs.
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
# -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
enablePSP: false
# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
namespaceOverride: ""
# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
annotations: {}
serviceAccount:
# -- Annotations to add to the service account
annotations: {}
metrics:
serviceMonitor:
# -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
enabled: false
## openshift settings
openshift:
# -- Setting that allows Longhorn to integrate with OpenShift.
enabled: false
ui:
# -- Route for connections between Longhorn and the OpenShift web console.
route: "longhorn-ui"
# -- Port for accessing the OpenShift web console.
port: 443
# -- Port for proxy that provides access to the OpenShift web console.
proxy: 8443
# -- Setting that allows Longhorn to generate code coverage profiles.
enableGoCoverDir: false

View File

@ -2980,6 +2980,50 @@ entries:
- assets/harvester-csi-driver/harvester-csi-driver-100.0.0+up0.1.9.tgz
version: 100.0.0+up0.1.9
longhorn:
- annotations:
catalog.cattle.io/auto-install: longhorn-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Longhorn
catalog.cattle.io/kube-version: '>= 1.21.0-0'
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: longhorn
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.6.2
apiVersion: v1
appVersion: v1.6.2
created: "2024-07-15T09:48:38.476932124+08:00"
description: Longhorn is a distributed block storage system for Kubernetes.
digest: f3df8f83ead05a4b233b42e2bbb93fef4f9e076589200d70b637448149dbcefc
home: https://github.com/longhorn/longhorn
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
keywords:
- longhorn
- storage
- distributed
- block
- device
- iscsi
- nfs
kubeVersion: '>=1.21.0-0'
maintainers:
- email: maintainers@longhorn.io
name: Longhorn maintainers
name: longhorn
sources:
- https://github.com/longhorn/longhorn
- https://github.com/longhorn/longhorn-engine
- https://github.com/longhorn/longhorn-instance-manager
- https://github.com/longhorn/longhorn-share-manager
- https://github.com/longhorn/longhorn-manager
- https://github.com/longhorn/longhorn-ui
- https://github.com/longhorn/longhorn-tests
- https://github.com/longhorn/backing-image-manager
urls:
- assets/longhorn/longhorn-104.1.0+up1.6.2.tgz
version: 104.1.0+up1.6.2
- annotations:
catalog.cattle.io/auto-install: longhorn-crd=match
catalog.cattle.io/certified: rancher
@ -5053,6 +5097,21 @@ entries:
- assets/longhorn/longhorn-1.0.200.tgz
version: 1.0.200
longhorn-crd:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: longhorn-system
catalog.cattle.io/release-name: longhorn-crd
apiVersion: v1
appVersion: v1.6.2
created: "2024-07-15T09:52:38.022964455+08:00"
description: Installs the CRDs for longhorn.
digest: 5b1ce5b9443c9ec70feefe759861227239bf2749f2ca14dbec0d78fb73c28733
name: longhorn-crd
type: application
urls:
- assets/longhorn-crd/longhorn-crd-104.1.0+up1.6.2.tgz
version: 104.1.0+up1.6.2
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,239 @@
# Longhorn Chart
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/).
## Source Code
Longhorn is 100% open source software. Project source code is spread across a number of repos:
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
## Prerequisites
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
2. Kubernetes >= v1.21
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Installation
1. Add Longhorn chart repository.
```
helm repo add longhorn https://charts.longhorn.io
```
2. Update local Longhorn chart information from chart repository.
```
helm repo update
```
3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart.
```
kubectl create namespace longhorn-system
helm install longhorn longhorn/longhorn --namespace longhorn-system
```
## Uninstallation
```
kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
helm uninstall longhorn -n longhorn-system
kubectl delete namespace longhorn-system
```
## Values
The `values.yaml` contains items used to tweak a deployment of this chart.
### Cattle Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "global" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Network Policies
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "networkPolicies" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Image Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "image" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Service Settings
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if (and (hasPrefix "service" .Key) (not (contains "Account" .Key))) }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### StorageClass Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "persistence" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### CSI Settings
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "csi" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn Manager Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornManager" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn Driver Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornDriver" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn UI Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornUI" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Ingress Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "ingress" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Private Registry Settings
You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs).
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "privateRegistry" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### OS/Kubernetes Distro Settings
#### OpenShift Settings
For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md).
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "openshift" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Other Settings
| Key | Default | Description |
|-----|---------|-------------|
{{- range .Values }}
{{- if not (or (hasPrefix "defaultSettings" .Key)
(hasPrefix "networkPolicies" .Key)
(hasPrefix "image" .Key)
(hasPrefix "service" .Key)
(hasPrefix "persistence" .Key)
(hasPrefix "csi" .Key)
(hasPrefix "longhornManager" .Key)
(hasPrefix "longhornDriver" .Key)
(hasPrefix "longhornUI" .Key)
(hasPrefix "privateRegistry" .Key)
(hasPrefix "ingress" .Key)
(hasPrefix "openshift" .Key)
(hasPrefix "global" .Key)) }}
| {{ .Key }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### System Default Settings
During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs).
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "defaultSettings" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
---
Please see [link](https://github.com/longhorn/longhorn) for more information.

View File

@ -0,0 +1,11 @@
# Longhorn
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)

View File

@ -0,0 +1,177 @@
# OpenShift / OKD Extra Configuration Steps
- [OpenShift / OKD Extra Configuration Steps](#openshift--okd-extra-configuration-steps)
- [Notes](#notes)
- [Known Issues](#known-issues)
- [Preparing Nodes (Optional)](#preparing-nodes-optional)
- [Default /var/lib/longhorn setup](#default-varliblonghorn-setup)
- [Separate /var/mnt/longhorn setup](#separate-varmntlonghorn-setup)
- [Create Filesystem](#create-filesystem)
- [Mounting Disk On Boot](#mounting-disk-on-boot)
- [Label and Annotate Nodes](#label-and-annotate-nodes)
- [Example values.yaml](#example-valuesyaml)
- [Installation](#installation)
- [Refs](#refs)
## Notes
Main changes and tasks for OCP are:
- On OCP / OKD, the Operating System is Managed by the Cluster
- OCP Imposes [Security Context Constraints](https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html)
- This requires everything to run with the least privilege possible. For the moment every component has been given access to run as higher privilege.
- Something to circle back on is network polices and which components can have their privileges reduced without impacting functionality.
- The UI probably can be for example.
- openshift/oauth-proxy for authentication to the Longhorn Ui
- **⚠️** Currently Scoped to Authenticated Users that can delete a longhorn settings object.
- **⚠️** Since the UI it self is not protected, network policies will need to be created to prevent namespace <--> namespace communication against the pod or service object directly.
- Anyone with access to the UI Deployment can remove the route restriction. (Namespace Scoped Admin)
- Option to use separate disk in /var/mnt/longhorn & MachineConfig file to mount /var/mnt/longhorn
- Adding finalizers for mount propagation
## Known Issues
- General Feature/Issue Thread
- [[FEATURE] Deploying Longhorn on OKD/Openshift](https://github.com/longhorn/longhorn/issues/1831)
- 4.10 / 1.23:
- 4.10.0-0.okd-2022-03-07-131213 to 4.10.0-0.okd-2022-07-09-073606
- Tested, No Known Issues
- 4.11 / 1.24:
- 4.11.0-0.okd-2022-07-27-052000 to 4.11.0-0.okd-2022-11-19-050030
- Tested, No Known Issues
- 4.11.0-0.okd-2022-12-02-145640, 4.11.0-0.okd-2023-01-14-152430:
- Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988)
- [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772)
- 4.12 / 1.25:
- 4.12.0-0.okd-2022-12-05-210624 to 4.12.0-0.okd-2023-01-20-101927
- Tested, No Known Issues
- 4.12.0-0.okd-2023-01-21-055900 to 4.12.0-0.okd-2023-02-18-033438:
- Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988)
- [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772)
- 4.12.0-0.okd-2023-03-05-022504 - 4.12.0-0.okd-2023-04-16-041331:
- Tested, No Known Issues
- 4.13 / 1.26:
- 4.13.0-0.okd-2023-05-03-001308 - 4.13.0-0.okd-2023-08-18-135805:
- Tested, No Known Issues
- 4.14 / 1.27:
- 4.14.0-0.okd-2023-08-12-022330 - 4.14.0-0.okd-2023-10-28-073550:
- Tested, No Known Issues
## Preparing Nodes (Optional)
Only required if you require additional customizations, such as storage-less nodes, or secondary disks.
### Default /var/lib/longhorn setup
Label each node for storage with:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc label node "${NODE}" node.longhorn.io/create-default-disk=true
```
### Separate /var/mnt/longhorn setup
#### Create Filesystem
On the storage nodes create a filesystem with the label longhorn:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc debug node/${NODE} -t -- chroot /host bash
# Validate Target Drive is Present
lsblk
export DRIVE="sdb" #vdb
sudo mkfs.ext4 -L longhorn /dev/${DRIVE}
```
> ⚠️ Note: If you add New Nodes After the below Machine Config is applied, you will need to also reboot the node.
#### Mounting Disk On Boot
The Secondary Drive needs to be mounted on every boot. Save the Concents and Apply the MachineConfig with `oc apply -f`:
> ⚠️ This will trigger an machine config profile update and reboot all worker nodes on the cluster
```yaml
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 71-mount-storage-worker
spec:
config:
ignition:
version: 3.2.0
systemd:
units:
- name: var-mnt-longhorn.mount
enabled: true
contents: |
[Unit]
Before=local-fs.target
[Mount]
Where=/var/mnt/longhorn
What=/dev/disk/by-label/longhorn
Options=rw,relatime,discard
[Install]
WantedBy=local-fs.target
```
#### Label and Annotate Nodes
Label and annotate storage nodes like this:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc annotate node ${NODE} --overwrite node.longhorn.io/default-disks-config='[{"path":"/var/mnt/longhorn","allowScheduling":true}]'
oc label node ${NODE} node.longhorn.io/create-default-disk=config
```
## Example values.yaml
Minimum Adjustments Required
```yaml
openshift:
oauthProxy:
repository: quay.io/openshift/origin-oauth-proxy
tag: 4.14 # Use Your OCP/OKD 4.X Version, Current Stable is 4.14
# defaultSettings: # Preparing nodes (Optional)
# createDefaultDiskLabeledNodes: true
openshift:
enabled: true
ui:
route: "longhorn-ui"
port: 443
proxy: 8443
```
## Installation
```bash
# helm template ./chart/ --namespace longhorn-system --values ./chart/values.yaml --no-hooks > longhorn.yaml # Local Testing
helm template longhorn --namespace longhorn-system --values values.yaml --no-hooks > longhorn.yaml
oc create namespace longhorn-system -o yaml --dry-run=client | oc apply -f -
oc apply -f longhorn.yaml -n longhorn-system
```
## Refs
- <https://docs.openshift.com/container-platform/4.11/storage/persistent_storage/persistent-storage-iscsi.html>
- <https://docs.okd.io/4.11/storage/persistent_storage/persistent-storage-iscsi.html>
- okd 4.5: <https://github.com/longhorn/longhorn/issues/1831#issuecomment-702690613>
- okd 4.6: <https://github.com/longhorn/longhorn/issues/1831#issuecomment-765884631>
- oauth-proxy: <https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml>
- <https://github.com/longhorn/longhorn/issues/1831>

View File

@ -0,0 +1,920 @@
categories:
- storage
namespace: longhorn-system
questions:
- variable: image.defaultImage
default: "true"
description: "Use default Longhorn images"
label: Use Default Images
type: boolean
show_subquestion_if: false
group: "Longhorn Images"
subquestions:
- variable: image.longhorn.manager.repository
default: longhornio/longhorn-manager
description: "Repository for the Longhorn Manager image."
type: string
label: Longhorn Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.manager.tag
default: v1.6.2
description: "Specify Longhorn Manager Image Tag"
type: string
label: Longhorn Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.repository
default: longhornio/longhorn-engine
description: "Repository for the Longhorn Engine image."
type: string
label: Longhorn Engine Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.tag
default: v1.6.2
description: "Specify Longhorn Engine Image Tag"
type: string
label: Longhorn Engine Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.repository
default: longhornio/longhorn-ui
description: "Repository for the Longhorn UI image."
type: string
label: Longhorn UI Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.tag
default: v1.6.2
description: "Specify Longhorn UI Image Tag"
type: string
label: Longhorn UI Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.repository
default: longhornio/longhorn-instance-manager
description: "Repository for the Longhorn Instance Manager image."
type: string
label: Longhorn Instance Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.tag
default: v1.6.2
description: "Specify Longhorn Instance Manager Image Tag"
type: string
label: Longhorn Instance Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.repository
default: longhornio/longhorn-share-manager
description: "Repository for the Longhorn Share Manager image."
type: string
label: Longhorn Share Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.tag
default: v1.6.2
description: "Specify Longhorn Share Manager Image Tag"
type: string
label: Longhorn Share Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.repository
default: longhornio/backing-image-manager
description: "Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn Backing Image Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.tag
default: v1.6.2
description: "Specify Longhorn Backing Image Manager Image Tag"
type: string
label: Longhorn Backing Image Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.repository
default: longhornio/support-bundle-kit
description: "Repository for the Longhorn Support Bundle Manager image."
type: string
label: Longhorn Support Bundle Kit Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.tag
default: v0.0.37
description: "Tag for the Longhorn Support Bundle Manager image."
type: string
label: Longhorn Support Bundle Kit Image Tag
group: "Longhorn Images Settings"
- variable: image.csi.attacher.repository
default: longhornio/csi-attacher
description: "Repository for the CSI attacher image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Attacher Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.attacher.tag
default: v4.5.1
description: "Tag for the CSI attacher image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Attacher Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.repository
default: longhornio/csi-provisioner
description: "Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Provisioner Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.tag
default: v3.6.4
description: "Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Provisioner Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.repository
default: longhornio/csi-node-driver-registrar
description: "Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Node Driver Registrar Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.tag
default: v2.9.2
description: "Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Node Driver Registrar Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.repository
default: longhornio/csi-resizer
description: "Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Resizer Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.tag
default: v1.10.1
description: "Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Resizer Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.repository
default: longhornio/csi-snapshotter
description: "Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Snapshotter Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.tag
default: v6.3.4
description: "Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Snapshotter Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.repository
default: longhornio/livenessprobe
description: "Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Liveness Probe Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.tag
default: v2.12.0
description: "Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Liveness Probe Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.openshift.oauthProxy.repository
default: longhornio/openshift-origin-oauth-proxy
description: "Repository for the OAuth Proxy image. This setting applies only to OpenShift users"
type: string
label: OpenShift OAuth Proxy Image Repository
group: "OpenShift Images"
- variable: image.openshift.oauthProxy.tag
default: 4.14
description: "Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later."
type: string
label: OpenShift OAuth Proxy Image Tag
group: "OpenShift Images"
- variable: privateRegistry.registryUrl
label: Private registry URL
description: "URL of a private registry. When unspecified, Longhorn uses the default system registry."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.registrySecret
label: Private registry secret name
description: "Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name."
group: "Private Registry Settings"
type: string
default: ""
- variable: privateRegistry.createSecret
default: "true"
description: "Setting that allows you to create a private registry secret."
type: boolean
group: "Private Registry Settings"
label: Create Secret for Private Registry Settings
show_subquestion_if: true
subquestions:
- variable: privateRegistry.registryUser
label: Private registry user
description: "User account used for authenticating with a private registry."
type: string
default: ""
- variable: privateRegistry.registryPasswd
label: Private registry password
description: "Password for authenticating with a private registry."
type: password
default: ""
- variable: longhorn.default_setting
default: "false"
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
label: "Customize Default Settings"
type: boolean
show_subquestion_if: true
group: "Longhorn Default Settings"
subquestions:
- variable: csi.kubeletRootDir
default:
description: "kubelet root directory. When unspecified, Longhorn uses the default value."
type: string
label: Kubelet Root Directory
group: "Longhorn CSI Driver Settings"
- variable: csi.attacherReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Attacher replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.provisionerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Provisioner replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.resizerReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Resizer replica count
group: "Longhorn CSI Driver Settings"
- variable: csi.snapshotterReplicaCount
type: int
default: 3
min: 1
max: 10
description: "Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value (\"3\")."
label: Longhorn CSI Snapshotter replica count
group: "Longhorn CSI Driver Settings"
- variable: defaultSettings.backupTarget
label: Backup Target
description: "Endpoint used to access the backupstore. (Options: \"NFS\", \"CIFS\", \"AWS\", \"GCP\", \"AZURE\")"
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.backupTargetCredentialSecret
label: Backup Target Credential Secret
description: "Name of the Kubernetes secret associated with the backup target."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
label: Allow Recurring Job While Volume Is Detached
description: 'Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.snapshotMaxCount
label: Snapshot Maximum Count
description: 'Maximum snapshot count for a volume. The value should be between 2 to 250.'
group: "Longhorn Default Settings"
type: int
min: 2
max: 250
default: 250
- variable: defaultSettings.createDefaultDiskLabeledNodes
label: Create Default Disk on Labeled Nodes
description: 'Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.defaultDataPath
label: Default Data Path
description: 'Default path for storing data on a host. The default value is "/var/lib/longhorn/".'
group: "Longhorn Default Settings"
type: string
default: "/var/lib/longhorn/"
- variable: defaultSettings.defaultDataLocality
label: Default Data Locality
description: 'Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: defaultSettings.replicaSoftAntiAffinity
label: Replica Node Level Soft Anti-Affinity
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default, false.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.replicaAutoBalance
label: Replica Auto Balance
description: 'Enable this setting automatically re-balances replicas when discovered an available node.'
group: "Longhorn Default Settings"
type: enum
options:
- "disabled"
- "least-effort"
- "best-effort"
default: "disabled"
- variable: defaultSettings.storageOverProvisioningPercentage
label: Storage Over Provisioning Percentage
description: "Percentage of storage that can be allocated relative to hard drive capacity. The default value is 100."
group: "Longhorn Default Settings"
type: int
min: 0
default: 100
- variable: defaultSettings.storageMinimalAvailablePercentage
label: Storage Minimal Available Percentage
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default, 25."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 25
- variable: defaultSettings.storageReservedPercentageForDefaultDisk
label: Storage Reserved Percentage For Default Disk
description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 30
- variable: defaultSettings.upgradeChecker
label: Enable Upgrade Checker
description: 'Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.defaultReplicaCount
label: Default Replica Count
description: "Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is \"3\"."
group: "Longhorn Default Settings"
type: int
min: 1
max: 20
default: 3
- variable: defaultSettings.defaultLonghornStaticStorageClass
label: Default Longhorn Static StorageClass Name
description: "Default Longhorn StorageClass. \"storageClassName\" is assigned to PVs and PVCs that are created for an existing Longhorn volume. \"storageClassName\" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is \"longhorn-static\"."
group: "Longhorn Default Settings"
type: string
default: "longhorn-static"
- variable: defaultSettings.backupstorePollInterval
label: Backupstore Poll Interval
description: "Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is \"300\". When the value is \"0\", polling is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.failedBackupTTL
label: Failed Backup Time to Live
description: "Number of minutes that Longhorn keeps a failed backup resource. When the value is \"0\", automatic deletion is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1440
- variable: defaultSettings.restoreVolumeRecurringJobs
label: Restore Volume Recurring Jobs
description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.recurringSuccessfulJobsHistoryLimit
label: Cronjob Successful Jobs History Limit
description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.recurringFailedJobsHistoryLimit
label: Cronjob Failed Jobs History Limit
description: 'Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.'
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.recurringJobMaxRetention
label: Maximum Retention Number for Recurring Job
description: "Maximum number of snapshots or backups to be retained."
group: "Longhorn Default Settings"
type: int
default: 100
- variable: defaultSettings.supportBundleFailedHistoryLimit
label: SupportBundle Failed History Limit
description: "This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1
- variable: defaultSettings.autoSalvage
label: Automatic salvage
description: "Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
description: 'Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.disableSchedulingOnCordonedNode
label: Disable Scheduling On Cordoned Node
description: "Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.replicaZoneSoftAntiAffinity
label: Replica Zone Level Soft Anti-Affinity
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By, default true."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.replicaDiskSoftAntiAffinity
label: Replica Disk Level Soft Anti-Affinity
description: 'Allow scheduling on disks with existing healthy replicas of the same volume. By default, true.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.allowEmptyNodeSelectorVolume
label: Allow Empty Node Selector Volume
description: "Setting that allows scheduling of empty node selector volumes to any node."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.allowEmptyDiskSelectorVolume
label: Allow Empty Disk Selector Volume
description: "Setting that allows scheduling of empty disk selector volumes to any disk."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.nodeDownPodDeletionPolicy
label: Pod Deletion Policy When Node is Down
description: "Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed."
group: "Longhorn Default Settings"
type: enum
options:
- "do-nothing"
- "delete-statefulset-pod"
- "delete-deployment-pod"
- "delete-both-statefulset-and-deployment-pod"
default: "do-nothing"
- variable: defaultSettings.nodeDrainPolicy
label: Node Drain Policy
description: "Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained."
group: "Longhorn Default Settings"
type: enum
options:
- "block-for-eviction"
- "block-for-eviction-if-contains-last-replica"
- "block-if-contains-last-replica"
- "allow-if-replica-is-stopped"
- "always-allow"
default: "block-if-contains-last-replica"
- variable: defaultSettings.detachManuallyAttachedVolumesWhenCordoned
label: Detach Manually Attached Volumes When Cordoned
description: "Setting that allows automatic detaching of manually-attached volumes when a node is cordoned."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.priorityClass
label: Priority Class
description: "PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Longhorn system contains user deployed components (E.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (E.g, instance manager, engine image, CSI driver, etc.) Note that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
group: "Longhorn Default Settings"
type: string
default: "longhorn-critical"
- variable: defaultSettings.replicaReplenishmentWaitInterval
label: Replica Replenishment Wait Interval
description: "The interval in seconds determines how long Longhorn will at least wait to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume."
group: "Longhorn Default Settings"
type: int
min: 0
default: 600
- variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
label: Concurrent Replica Rebuild Per Node Limit
description: "Maximum number of replicas that can be concurrently rebuilt on each node.
WARNING:
- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
label: Concurrent Volume Backup Restore Per Node Limit
description: "Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is \"0\", restoration of volumes using a backup is disabled."
group: "Longhorn Default Settings"
type: int
min: 0
default: 5
- variable: defaultSettings.disableRevisionCounter
label: Disable Revision Counter
description: "Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the \"volume-head-xxx.img\" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.systemManagedPodsImagePullPolicy
label: System Managed Pod Image Pull Policy
description: "Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart."
group: "Longhorn Default Settings"
type: enum
options:
- "if-not-present"
- "always"
- "never"
default: "if-not-present"
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
label: Allow Volume Creation with Degraded Availability
description: "Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
label: Automatically Cleanup System Generated Snapshot
description: "Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.autoCleanupRecurringJobBackupSnapshot
label: Automatically Cleanup Recurring Job Backup Snapshot
description: "Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job."
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
label: Concurrent Automatic Engine Upgrade Per Node Limit
description: "Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is \"0\", Longhorn does not automatically upgrade volume engines to the new default engine image version."
group: "Longhorn Default Settings"
type: int
min: 0
default: 0
- variable: defaultSettings.backingImageCleanupWaitInterval
label: Backing Image Cleanup Wait Interval
description: "Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it."
group: "Longhorn Default Settings"
type: int
min: 0
default: 60
- variable: defaultSettings.backingImageRecoveryWaitInterval
label: Backing Image Recovery Wait Interval
description: "Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to \"failed\" or \"unknown\"."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.guaranteedInstanceManagerCPU
label: Guaranteed Instance Manager CPU
description: "Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is \"12\".
WARNING:
- Value 0 means removing the CPU requests from spec of instance manager pods.
- Considering the possible number of new instance manager pods in a further system upgrade, this integer value ranges from 0 to 40.
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
- This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
- After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
group: "Longhorn Default Settings"
type: int
min: 0
max: 40
default: 12
- variable: defaultSettings.logLevel
label: Log Level
description: 'Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")'
group: "Longhorn Default Settings"
type: string
default: "Info"
- variable: defaultSettings.disableSnapshotPurge
label: Disable Snapshot Purge
description: "Setting that temporarily prevents all attempts to purge volume snapshots."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
label: Kubernetes Cluster Autoscaler Enabled (Experimental)
description: "Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
WARNING:
- Replica rebuilding could be expensive because nodes with reusable replicas could get removed by the Kubernetes Cluster Autoscaler."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.orphanAutoDeletion
label: Orphaned Data Cleanup
description: "Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.storageNetwork
label: Storage Network
description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
WARNING:
- This setting should change after detaching all Longhorn volumes, as some of the Longhorn system component pods will get recreated to apply the setting. Longhorn will try to block this setting update when there are attached volumes."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.deletingConfirmationFlag
label: Deleting Confirmation Flag
description: "Flag that prevents accidental uninstallation of Longhorn."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.engineReplicaTimeout
label: Timeout between Engine and Replica
description: "Timeout between the Longhorn Engine and replicas. Specify a value between \"8\" and \"30\" seconds. The default value is \"8\"."
group: "Longhorn Default Settings"
type: int
default: "8"
- variable: defaultSettings.snapshotDataIntegrity
label: Snapshot Data Integrity
description: "This setting allows users to enable or disable snapshot hashing and data integrity checking."
group: "Longhorn Default Settings"
type: string
default: "disabled"
- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation
label: Immediate Snapshot Data Integrity Check After Creating a Snapshot
description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.snapshotDataIntegrityCronjob
label: Snapshot Data Integrity Check CronJob
description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files."
group: "Longhorn Default Settings"
type: string
default: "0 0 */7 * *"
- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
label: Remove Snapshots During Filesystem Trim
description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children."
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.fastReplicaRebuildEnabled
label: Fast Replica Rebuild Enabled
description: "Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to \"enable\" or \"fast-check\"."
group: "Longhorn Default Settings"
type: boolean
default: false
- variable: defaultSettings.replicaFileSyncHttpClientTimeout
label: Timeout of HTTP Client to Replica File Sync Server
description: "In seconds. The setting specifies the HTTP client timeout to the file sync server."
group: "Longhorn Default Settings"
type: int
default: "30"
- variable: defaultSettings.backupCompressionMethod
label: Backup Compression Method
description: "Setting that allows you to specify a backup compression method."
group: "Longhorn Default Settings"
type: string
default: "lz4"
- variable: defaultSettings.backupConcurrentLimit
label: Backup Concurrent Limit Per Backup
description: "Maximum number of worker threads that can concurrently run for each backup."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.restoreConcurrentLimit
label: Restore Concurrent Limit Per Backup
description: "This setting controls how many worker threads per restore concurrently."
group: "Longhorn Default Settings"
type: int
min: 1
default: 2
- variable: defaultSettings.allowCollectingLonghornUsageMetrics
label: Allow Collecting Longhorn Usage Metrics
description: "Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses."
group: "Longhorn Default Settings"
type: boolean
default: true
- variable: defaultSettings.v1DataEngine
label: V1 Data Engine
description: "Setting that allows you to enable the V1 Data Engine."
group: "Longhorn V1 Data Engine Settings"
type: boolean
default: true
- variable: defaultSettings.v2DataEngine
label: V2 Data Engine
description: "Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments.
WARNING:
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
- When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
type: boolean
default: false
- variable: defaultSettings.v2DataEngineHugepageLimit
label: V2 Data Engine
description: "This allows users to configure maximum huge page size (in MiB) for the V2 Data Engine."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
type: int
default: "2048"
- variable: defaultSettings.offlineReplicaRebuilding
label: Offline Replica Rebuilding
description: "Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine."
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
required: true
type: enum
options:
- "enabled"
- "disabled"
default: "enabled"
- variable: persistence.defaultClass
default: "true"
description: "Setting that allows you to specify the default Longhorn StorageClass."
label: Default Storage Class
group: "Longhorn Storage Class Settings"
required: true
type: boolean
- variable: persistence.reclaimPolicy
label: Storage Class Retain Policy
description: "Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: \"Retain\", \"Delete\")"
group: "Longhorn Storage Class Settings"
required: true
type: enum
options:
- "Delete"
- "Retain"
default: "Delete"
- variable: persistence.defaultClassReplicaCount
description: "Replica count of the default Longhorn StorageClass."
label: Default Storage Class Replica Count
group: "Longhorn Storage Class Settings"
type: int
min: 1
max: 10
default: 3
- variable: persistence.defaultDataLocality
description: "Data locality of the default Longhorn StorageClass. (Options: \"disabled\", \"best-effort\")"
label: Default Storage Class Data Locality
group: "Longhorn Storage Class Settings"
type: enum
options:
- "disabled"
- "best-effort"
default: "disabled"
- variable: persistence.recurringJobSelector.enable
description: "Setting that allows you to enable the recurring job selector for a Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Recurring Job Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.recurringJobSelector.jobList
description: 'Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)'
label: Storage Class Recurring Job Selector List
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.defaultNodeSelector.enable
description: "Setting that allows you to enable the node selector for the default Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Enable Storage Class Node Selector
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.defaultNodeSelector.selector
label: Storage Class Node Selector
description: 'Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")'
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.enable
description: "Setting that allows you to use a backing image in a Longhorn StorageClass."
group: "Longhorn Storage Class Settings"
label: Default Storage Class Backing Image
type: boolean
default: false
show_subquestion_if: true
subquestions:
- variable: persistence.backingImage.name
description: 'Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.'
label: Storage Class Backing Image Name
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.expectedChecksum
description: 'Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- It is not recommended to set this field if the data source type is \"export-from-volume\".'
label: Storage Class Backing Image Expected SHA512 Checksum
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.backingImage.dataSourceType
description: 'Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type.
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
label: Storage Class Backing Image Data Source Type
group: "Longhorn Storage Class Settings"
type: enum
options:
- ""
- "download"
- "upload"
- "export-from-volume"
default: ""
- variable: persistence.backingImage.dataSourceParameters
description: "Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
WARNING:
- If the backing image name is not specified, setting this field is meaningless.
- Be careful of the quotes here."
label: Storage Class Backing Image Data Source Parameters
group: "Longhorn Storage Class Settings"
type: string
default:
- variable: persistence.removeSnapshotsDuringFilesystemTrim
description: "Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: \"ignored\", \"enabled\", \"disabled\")"
label: Default Storage Class Remove Snapshots During Filesystem Trim
group: "Longhorn Storage Class Settings"
type: enum
options:
- "ignored"
- "enabled"
- "disabled"
default: "ignored"
- variable: ingress.enabled
default: "false"
description: "Expose app using Layer 7 Load Balancer - ingress"
type: boolean
group: "Services and Load Balancing"
label: Expose app using Layer 7 Load Balancer
show_subquestion_if: true
subquestions:
- variable: ingress.host
default: "xip.io"
description: "Hostname of the Layer 7 load balancer."
type: hostname
required: true
label: Layer 7 Load Balancer Hostname
- variable: ingress.path
default: "/"
description: "Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}."
type: string
required: true
label: Ingress Path
- variable: service.ui.type
default: "Rancher-Proxy"
description: "Service type for Longhorn UI. (Options: \"ClusterIP\", \"NodePort\", \"LoadBalancer\", \"Rancher-Proxy\")"
type: enum
options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"
- "Rancher-Proxy"
label: Longhorn UI Service
show_if: "ingress.enabled=false"
group: "Services and Load Balancing"
show_subquestion_if: "NodePort"
subquestions:
- variable: service.ui.nodePort
default: ""
description: "NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767."
type: int
min: 30000
max: 32767
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
label: UI Service NodePort number
- variable: enablePSP
default: "false"
description: "Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled."
label: Pod Security Policy
type: boolean
group: "Other Settings"
- variable: global.cattle.windowsCluster.enabled
default: "false"
description: "Setting that allows Longhorn to run on a Rancher Windows cluster."
label: Rancher Windows Cluster
type: boolean
group: "Other Settings"
- variable: networkPolicies.enabled
description: "Setting that allows you to enable network policies that control access to Longhorn pods.
Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added."
group: "Other Settings"
label: Network Policies
default: "false"
type: boolean
subquestions:
- variable: networkPolicies.type
label: Network Policies for Ingress
description: "Distribution that determines the policy for allowing access for an ingress. (Options: \"k3s\", \"rke2\", \"rke1\")"
show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
type: enum
default: "rke2"
options:
- "rke1"
- "rke2"
- "k3s"
- variable: defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU
label: Guaranteed Instance Manager CPU for V2 Data Engine
description: 'Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
WARNING:
- Specifying a value of 0 disables CPU requests for instance manager pods. You must specify an integer between 1000 and 8000.
- This is a global setting. Modifying the value triggers an automatic restart of the instance manager pods. Do not modify the value while volumes are still attached."
group: "Longhorn Default Settings'
type: int
min: 1000
max: 8000
default: 1250

View File

@ -0,0 +1,5 @@
Longhorn is now installed on the cluster!
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
Visit our documentation at https://longhorn.io/docs/

View File

@ -0,0 +1,77 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups: [""]
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"]
verbs: ["*"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["daemonsets", "statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets", "podsecuritypolicies"]
verbs: ["*"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
verbs: ["*"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
verbs: ["*"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
{{- if .Values.openshift.enabled }}
"engineimages/finalizers", "nodes/finalizers", "instancemanagers/finalizers",
{{- end }}
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status", "backupbackingimages", "backupbackingimages/status"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
verbs: ["get", "list", "create", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"]
verbs: ["*"]
{{- if .Values.openshift.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-ocp-privileged-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
rules:
- apiGroups: ["security.openshift.io"]
resources: ["securitycontextconstraints"]
resourceNames: ["anyuid", "privileged"]
verbs: ["use"]
{{- end }}

View File

@ -0,0 +1,49 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-bind
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-support-bundle
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}
{{- if .Values.openshift.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-ocp-privileged-bind
labels: {{- include "longhorn.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-ocp-privileged-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: longhorn-ui-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: default # supportbundle-agent-support-bundle uses default sa
namespace: {{ include "release_namespace" . }}
{{- end }}

View File

@ -0,0 +1,167 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-manager
namespace: {{ include "release_namespace" . }}
spec:
selector:
matchLabels:
app: longhorn-manager
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-manager
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true
command:
- longhorn-manager
- -d
{{- if eq .Values.longhornManager.log.format "json" }}
- -j
{{- end }}
- daemon
- --engine-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
- --instance-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
- --share-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
- --backing-image-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
- --support-bundle-manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}"
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --service-account
- longhorn-service-account
{{- if .Values.preUpgradeChecker.upgradeVersionCheck}}
- --upgrade-version-check
{{- end }}
ports:
- containerPort: 9500
name: manager
- containerPort: 9501
name: conversion-wh
- containerPort: 9502
name: admission-wh
- containerPort: 9503
name: recov-backend
readinessProbe:
httpGet:
path: /v1/healthz
port: 9501
scheme: HTTPS
volumeMounts:
- name: dev
mountPath: /host/dev/
- name: proc
mountPath: /host/proc/
- name: longhorn
mountPath: /var/lib/longhorn/
mountPropagation: Bidirectional
- name: longhorn-grpc-tls
mountPath: /tls-files/
{{- if .Values.enableGoCoverDir }}
- name: go-cover-dir
mountPath: /go-cover-dir/
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.enableGoCoverDir }}
- name: GOCOVERDIR
value: /go-cover-dir/
{{- end }}
volumes:
- name: dev
hostPath:
path: /dev/
- name: proc
hostPath:
path: /proc/
- name: longhorn
hostPath:
path: /var/lib/longhorn/
{{- if .Values.enableGoCoverDir }}
- name: go-cover-dir
hostPath:
path: /go-cover-dir/
type: DirectoryOrCreate
{{- end }}
- name: longhorn-grpc-tls
secret:
secretName: longhorn-grpc-tls
optional: true
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-manager
name: longhorn-backend
namespace: {{ include "release_namespace" . }}
{{- if .Values.longhornManager.serviceAnnotations }}
annotations:
{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.manager.type }}
selector:
app: longhorn-manager
ports:
- name: manager
port: 9500
targetPort: manager
{{- if .Values.service.manager.nodePort }}
nodePort: {{ .Values.service.manager.nodePort }}
{{- end }}

View File

@ -0,0 +1,229 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
default-setting.yaml: |-
{{- if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }}
backup-target: {{ .Values.defaultSettings.backupTarget }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }}
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }}
allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }}
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }}
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }}
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }}
replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }}
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }}
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }}
storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }}
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }}
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}
default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}
failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }}
restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }}
recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringJobMaxRetention) }}
recurring-job-max-retention: {{ .Values.defaultSettings.recurringJobMaxRetention }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }}
recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }}
support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
{{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
{{- end -}}
{{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
{{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}
priority-class: {{ .Values.defaultSettings.priorityClass }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}
auto-salvage: {{ .Values.defaultSettings.autoSalvage }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}
auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }}
disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}
replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaDiskSoftAntiAffinity) }}
replica-disk-soft-anti-affinity: {{ .Values.defaultSettings.replicaDiskSoftAntiAffinity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}
node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }}
node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned) }}
detach-manually-attached-volumes-when-cordoned: {{ .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}
replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }}
concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }}
concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }}
disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }}
system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }}
allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }}
auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot) }}
auto-cleanup-recurring-job-backup-snapshot: {{ .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }}
concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }}
backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }}
backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }}
guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }}
kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }}
orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }}
storage-network: {{ .Values.defaultSettings.storageNetwork }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }}
deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }}
engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }}
snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }}
snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }}
snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }}
remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }}
fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }}
replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.logLevel) }}
log-level: {{ .Values.defaultSettings.logLevel }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }}
backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }}
backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }}
restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v1DataEngine) }}
v1-data-engine: {{ .Values.defaultSettings.v1DataEngine }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }}
v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineHugepageLimit) }}
v2-data-engine-hugepage-limit: {{ .Values.defaultSettings.v2DataEngineHugepageLimit }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.offlineReplicaRebuilding) }}
offline-replica-rebuilding: {{ .Values.defaultSettings.offlineReplicaRebuilding }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyNodeSelectorVolume) }}
allow-empty-node-selector-volume: {{ .Values.defaultSettings.allowEmptyNodeSelectorVolume }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyDiskSelectorVolume) }}
allow-empty-disk-selector-volume: {{ .Values.defaultSettings.allowEmptyDiskSelectorVolume }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.allowCollectingLonghornUsageMetrics) }}
allow-collecting-longhorn-usage-metrics: {{ .Values.defaultSettings.allowCollectingLonghornUsageMetrics }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.disableSnapshotPurge) }}
disable-snapshot-purge: {{ .Values.defaultSettings.disableSnapshotPurge }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU) }}
v2-data-engine-guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU }}
{{- end }}
{{- if not (kindIs "invalid" .Values.defaultSettings.snapshotMaxCount) }}
snapshot-max-count: {{ .Values.defaultSettings.snapshotMaxCount }}
{{- end }}

View File

@ -0,0 +1,132 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-driver-deployer
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-driver-deployer
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
- --manager-url
- http://longhorn-backend:9500/v1
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{- if .Values.csi.kubeletRootDir }}
- name: KUBELET_ROOT_DIR
value: {{ .Values.csi.kubeletRootDir }}
{{- end }}
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
- name: CSI_ATTACHER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
{{- end }}
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
- name: CSI_PROVISIONER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
{{- end }}
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
{{- end }}
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
- name: CSI_RESIZER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
{{- end }}
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
- name: CSI_SNAPSHOTTER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
{{- end }}
{{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
- name: CSI_LIVENESS_PROBE_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
{{- end }}
{{- if .Values.csi.attacherReplicaCount }}
- name: CSI_ATTACHER_REPLICA_COUNT
value: {{ .Values.csi.attacherReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.provisionerReplicaCount }}
- name: CSI_PROVISIONER_REPLICA_COUNT
value: {{ .Values.csi.provisionerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.resizerReplicaCount }}
- name: CSI_RESIZER_REPLICA_COUNT
value: {{ .Values.csi.resizerReplicaCount | quote }}
{{- end }}
{{- if .Values.csi.snapshotterReplicaCount }}
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
{{- end }}
{{- if .Values.enableGoCoverDir }}
- name: GOCOVERDIR
value: /go-cover-dir/
volumeMounts:
- name: go-cover-dir
mountPath: /go-cover-dir/
{{- end }}
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
securityContext:
runAsUser: 0
{{- if .Values.enableGoCoverDir }}
volumes:
- name: go-cover-dir
hostPath:
path: /go-cover-dir/
type: DirectoryOrCreate
{{- end }}

View File

@ -0,0 +1,182 @@
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
# https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml
# Create a proxy service account and ensure it will use the route "proxy"
# Create a secure connection to the proxy via a route
apiVersion: route.openshift.io/v1
kind: Route
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: {{ .Values.openshift.ui.route }}
namespace: {{ include "release_namespace" . }}
spec:
to:
kind: Service
name: longhorn-ui
tls:
termination: reencrypt
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
annotations:
service.alpha.openshift.io/serving-cert-secret-name: longhorn-ui-tls
spec:
ports:
- name: longhorn-ui
port: {{ .Values.openshift.ui.port | default 443 }}
targetPort: {{ .Values.openshift.ui.proxy | default 8443 }}
selector:
app: longhorn-ui
---
{{- end }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
spec:
replicas: {{ .Values.longhornUI.replicas }}
selector:
matchLabels:
app: longhorn-ui
template:
metadata:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-ui
spec:
serviceAccountName: longhorn-ui-service-account
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- longhorn-ui
topologyKey: kubernetes.io/hostname
containers:
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
- name: oauth-proxy
image: {{ template "registry_url" . }}{{ .Values.image.openshift.oauthProxy.repository }}:{{ .Values.image.openshift.oauthProxy.tag }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: {{ .Values.openshift.ui.proxy | default 8443 }}
name: public
args:
- --https-address=:{{ .Values.openshift.ui.proxy | default 8443 }}
- --provider=openshift
- --openshift-service-account=longhorn-ui-service-account
- --upstream=http://localhost:8000
- --tls-cert=/etc/tls/private/tls.crt
- --tls-key=/etc/tls/private/tls.key
- --cookie-secret=SECRET
- --openshift-sar={"namespace":"{{ include "release_namespace" . }}","group":"longhorn.io","resource":"setting","verb":"delete"}
volumeMounts:
- mountPath: /etc/tls/private
name: longhorn-ui-tls
{{- end }}
{{- end }}
- name: longhorn-ui
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
ports:
- containerPort: 8000
name: http
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
- name: LONGHORN_UI_PORT
value: "8000"
volumes:
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
- name: longhorn-ui-tls
secret:
secretName: longhorn-ui-tls
{{- end }}
{{- end }}
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornUI.priorityClass }}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
{{- end }}
{{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
---
kind: Service
apiVersion: v1
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ui
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
kubernetes.io/cluster-service: "true"
{{- end }}
name: longhorn-frontend
namespace: {{ include "release_namespace" . }}
spec:
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
type: ClusterIP
{{- else }}
type: {{ .Values.service.ui.type }}
{{- end }}
{{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
selector:
app: longhorn-ui
ports:
- name: http
port: 80
targetPort: http
{{- if .Values.service.ui.nodePort }}
nodePort: {{ .Values.service.ui.nodePort }}
{{- else }}
nodePort: null
{{- end }}

View File

@ -0,0 +1,37 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ingress
annotations:
{{- if .Values.ingress.secureBackends }}
ingress.kubernetes.io/secure-backends: "true"
{{- end }}
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ default "" .Values.ingress.path }}
pathType: ImplementationSpecific
backend:
service:
name: longhorn-frontend
port:
number: 80
{{- if .Values.ingress.tls }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Values.ingress.tlsSecret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-data-source
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backing-image-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: instance-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
longhorn.io/component: instance-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
longhorn.io/component: instance-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-manager
- podSelector:
matchLabels:
longhorn.io/component: backing-image-data-source
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-manager
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: longhorn-manager
- podSelector:
matchLabels:
app: longhorn-ui
- podSelector:
matchLabels:
app: longhorn-csi-plugin
- podSelector:
matchLabels:
longhorn.io/managed-by: longhorn-manager
matchExpressions:
- { key: recurring-job.longhorn.io, operator: Exists }
- podSelector:
matchExpressions:
- { key: longhorn.io/job-task, operator: Exists }
- podSelector:
matchLabels:
app: longhorn-driver-deployer
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-recovery-backend
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9503
{{- end }}

View File

@ -0,0 +1,46 @@
{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-ui-frontend
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-ui
policyTypes:
- Ingress
ingress:
- from:
{{- if eq .Values.networkPolicies.type "rke1"}}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: ingress-nginx
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
{{- else if eq .Values.networkPolicies.type "rke2" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: rke2-ingress-nginx
app.kubernetes.io/name: rke2-ingress-nginx
{{- else if eq .Values.networkPolicies.type "k3s" }}
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ports:
- port: 8000
protocol: TCP
- port: 80
protocol: TCP
{{- end }}
{{- end }}

View File

@ -0,0 +1,33 @@
{{- if .Values.networkPolicies.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-conversion-webhook
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9501
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: longhorn-admission-webhook
namespace: {{ include "release_namespace" . }}
spec:
podSelector:
matchLabels:
app: longhorn-manager
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9502
{{- end }}

View File

@ -0,0 +1,56 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
name: longhorn-post-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-post-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-post-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- post-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if and .Values.preUpgradeChecker.jobEnabled .Values.preUpgradeChecker.upgradeVersionCheck}}
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
name: longhorn-pre-upgrade
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-pre-upgrade
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-pre-upgrade
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- pre-upgrade
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,9 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: "longhorn-critical"
labels: {{- include "longhorn.labels" . | nindent 4 }}
description: "Ensure Longhorn pods have the highest priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure"
globalDefault: false
preemptionPolicy: PreemptLowerPriority
value: 1000000000

View File

@ -0,0 +1,66 @@
{{- if .Values.enablePSP }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: longhorn-psp
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- NET_RAW
allowedCapabilities:
- SYS_ADMIN
hostNetwork: false
hostIPC: false
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
fsGroup:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- secret
- projected
- hostPath
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: longhorn-psp-role
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- longhorn-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: longhorn-psp-binding
labels: {{- include "longhorn.labels" . | nindent 4 }}
namespace: {{ include "release_namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: longhorn-psp-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
- kind: ServiceAccount
name: default
namespace: {{ include "release_namespace" . }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.privateRegistry.createSecret }}
{{- if .Values.privateRegistry.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.privateRegistry.registrySecret }}
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "secret" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-service-account
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-ui-service-account
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.openshift.enabled }}
{{- if .Values.openshift.ui.route }}
{{- if not .Values.serviceAccount.annotations }}
annotations:
{{- end }}
serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"longhorn-ui"}}'
{{- end }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-support-bundle
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if .Values.metrics.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: longhorn-prometheus-servicemonitor
namespace: {{ include "release_namespace" . }}
labels:
{{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-prometheus-servicemonitor
spec:
selector:
matchLabels:
app: longhorn-manager
namespaceSelector:
matchNames:
- {{ include "release_namespace" . }}
endpoints:
- port: manager
{{- end }}

View File

@ -0,0 +1,71 @@
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-conversion-webhook
name: longhorn-conversion-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: conversion-webhook
port: 9501
targetPort: conversion-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-admission-webhook
name: longhorn-admission-webhook
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: admission-webhook
port: 9502
targetPort: admission-wh
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-recovery-backend
name: longhorn-recovery-backend
namespace: {{ include "release_namespace" . }}
spec:
type: ClusterIP
selector:
app: longhorn-manager
ports:
- name: recovery-backend
port: 9503
targetPort: recov-backend
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-engine-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: engine
---
apiVersion: v1
kind: Service
metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
name: longhorn-replica-manager
namespace: {{ include "release_namespace" . }}
spec:
clusterIP: None
selector:
longhorn.io/component: instance-manager
longhorn.io/instance-manager-type: replica

View File

@ -0,0 +1,50 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-storageclass
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
data:
storageclass.yaml: |
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
annotations:
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
staleReplicaTimeout: "30"
fromBackup: ""
{{- if .Values.persistence.defaultFsType }}
fsType: "{{ .Values.persistence.defaultFsType }}"
{{- end }}
{{- if .Values.persistence.defaultMkfsParams }}
mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}"
{{- end }}
{{- if .Values.persistence.migratable }}
migratable: "{{ .Values.persistence.migratable }}"
{{- end }}
{{- if .Values.persistence.nfsOptions }}
nfsOptions: "{{ .Values.persistence.nfsOptions }}"
{{- end }}
{{- if .Values.persistence.backingImage.enable }}
backingImage: {{ .Values.persistence.backingImage.name }}
backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
{{- end }}
{{- if .Values.persistence.recurringJobSelector.enable }}
recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
{{- end }}
dataLocality: {{ .Values.persistence.defaultDataLocality | quote }}
{{- if .Values.persistence.defaultNodeSelector.enable }}
nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}"
{{- end }}
{{- if .Values.persistence.removeSnapshotsDuringFilesystemTrim }}
unmapMarkSnapChainRemoved: "{{ .Values.persistence.removeSnapshotsDuringFilesystemTrim }}"
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
namespace: {{ include "release_namespace" $ }}
labels: {{- include "longhorn.labels" $ | nindent 4 }}
app: longhorn
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,57 @@
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
name: longhorn-uninstall
namespace: {{ include "release_namespace" . }}
labels: {{- include "longhorn.labels" . | nindent 4 }}
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
template:
metadata:
name: longhorn-uninstall
labels: {{- include "longhorn.labels" . | nindent 8 }}
spec:
containers:
- name: longhorn-uninstall
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- longhorn-manager
- uninstall
- --force
env:
- name: LONGHORN_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.enablePSP }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,484 @@
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
# -- Default system registry.
systemDefaultRegistry: ""
windowsCluster:
# -- Setting that allows Longhorn to run on a Rancher Windows cluster.
enabled: false
# -- Toleration for Linux nodes that can run user-deployed Longhorn components.
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# -- Node selector for Linux nodes that can run user-deployed Longhorn components.
nodeSelector:
kubernetes.io/os: "linux"
defaultSetting:
# -- Toleration for system-managed Longhorn components.
taintToleration: cattle.io/os=linux:NoSchedule
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
networkPolicies:
# -- Setting that allows you to enable network policies that control access to Longhorn pods.
enabled: false
# -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
type: "k3s"
image:
longhorn:
engine:
# -- Repository for the Longhorn Engine image.
repository: longhornio/longhorn-engine
# -- Specify Longhorn engine image tag
tag: v1.6.2
manager:
# -- Repository for the Longhorn Manager image.
repository: longhornio/longhorn-manager
# -- Specify Longhorn manager image tag
tag: v1.6.2
ui:
# -- Repository for the Longhorn UI image.
repository: longhornio/longhorn-ui
# -- Specify Longhorn ui image tag
tag: v1.6.2
instanceManager:
# -- Repository for the Longhorn Instance Manager image.
repository: longhornio/longhorn-instance-manager
# -- Specify Longhorn instance manager image tag
tag: v1.6.2
shareManager:
# -- Repository for the Longhorn Share Manager image.
repository: longhornio/longhorn-share-manager
# -- Specify Longhorn share manager image tag
tag: v1.6.2
backingImageManager:
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
repository: longhornio/backing-image-manager
# -- Specify Longhorn backing image manager image tag
tag: v1.6.2
supportBundleKit:
# -- Repository for the Longhorn Support Bundle Manager image.
repository: longhornio/support-bundle-kit
# -- Tag for the Longhorn Support Bundle Manager image.
tag: v0.0.37
csi:
attacher:
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-attacher
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
tag: v4.5.1
provisioner:
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-provisioner
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
tag: v3.6.4
nodeDriverRegistrar:
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-node-driver-registrar
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
tag: v2.9.2
resizer:
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-resizer
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
tag: v1.10.1
snapshotter:
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-snapshotter
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
tag: v6.3.4
livenessProbe:
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
repository: longhornio/livenessprobe
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
tag: v2.12.0
openshift:
oauthProxy:
# -- Repository for the OAuth Proxy image. This setting applies only to OpenShift users.
repository: longhornio/openshift-origin-oauth-proxy
# -- Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14.
tag: 4.14
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
pullPolicy: IfNotPresent
service:
ui:
# -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
type: ClusterIP
# -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: null
manager:
# -- Service type for Longhorn Manager.
type: ClusterIP
# -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: ""
persistence:
# -- Setting that allows you to specify the default Longhorn StorageClass.
defaultClass: true
# -- Filesystem type of the default Longhorn StorageClass.
defaultFsType: ext4
# -- mkfs parameters of the default Longhorn StorageClass.
defaultMkfsParams: ""
# -- Replica count of the default Longhorn StorageClass.
defaultClassReplicaCount: 3
# -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
defaultDataLocality: disabled
# -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
reclaimPolicy: Delete
# -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
migratable: false
# -- Set NFS mount options for Longhorn StorageClass for RWX volumes
nfsOptions: ""
recurringJobSelector:
# -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
enable: false
# -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
jobList: []
backingImage:
# -- Setting that allows you to use a backing image in a Longhorn StorageClass.
enable: false
# -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
name: ~
# -- Data source type of a backing image used in a Longhorn StorageClass.
# If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
# If the backing image does not exist, Longhorn creates one using the specified data source type.
dataSourceType: ~
# -- Data source parameters of a backing image used in a Longhorn StorageClass.
# You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
dataSourceParameters: ~
# -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
expectedChecksum: ~
defaultNodeSelector:
# -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
enable: false
# -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
selector: ""
# -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
removeSnapshotsDuringFilesystemTrim: ignored
preUpgradeChecker:
# -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
jobEnabled: true
# -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
upgradeVersionCheck: true
csi:
# -- kubelet root directory. When unspecified, Longhorn uses the default value.
kubeletRootDir: ~
# -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
attacherReplicaCount: ~
# -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
provisionerReplicaCount: ~
# -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
resizerReplicaCount: ~
# -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
snapshotterReplicaCount: ~
defaultSettings:
# -- Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
backupTarget: ~
# -- Name of the Kubernetes secret associated with the backup target.
backupTargetCredentialSecret: ~
# -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
allowRecurringJobWhileVolumeDetached: ~
# -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
createDefaultDiskLabeledNodes: ~
# -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
defaultDataPath: ~
# -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
defaultDataLocality: ~
# -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
replicaSoftAntiAffinity: ~
# -- Setting that automatically rebalances replicas when an available node is discovered.
replicaAutoBalance: ~
# -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
storageOverProvisioningPercentage: ~
# -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
storageMinimalAvailablePercentage: ~
# -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
storageReservedPercentageForDefaultDisk: ~
# -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
upgradeChecker: ~
# -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
defaultReplicaCount: ~
# -- Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static".
defaultLonghornStaticStorageClass: ~
# -- Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
backupstorePollInterval: ~
# -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
failedBackupTTL: ~
# -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
restoreVolumeRecurringJobs: ~
# -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
recurringSuccessfulJobsHistoryLimit: ~
# -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
recurringFailedJobsHistoryLimit: ~
# -- Maximum number of snapshots or backups to be retained.
recurringJobMaxRetention: ~
# -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
supportBundleFailedHistoryLimit: ~
# -- Taint or toleration for system-managed Longhorn components.
# Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
taintToleration: ~
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: ~
# -- PriorityClass for system-managed Longhorn components.
# This setting can help prevent Longhorn components from being evicted under Node Pressure.
# Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
# -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
autoSalvage: ~
# -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
# -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
disableSchedulingOnCordonedNode: ~
# -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
replicaZoneSoftAntiAffinity: ~
# -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
replicaDiskSoftAntiAffinity: ~
# -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
nodeDownPodDeletionPolicy: ~
# -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
nodeDrainPolicy: ~
# -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
detachManuallyAttachedVolumesWhenCordoned: ~
# -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
replicaReplenishmentWaitInterval: ~
# -- Maximum number of replicas that can be concurrently rebuilt on each node.
concurrentReplicaRebuildPerNodeLimit: ~
# -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
concurrentVolumeBackupRestorePerNodeLimit: ~
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
disableRevisionCounter: ~
# -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
systemManagedPodsImagePullPolicy: ~
# -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
allowVolumeCreationWithDegradedAvailability: ~
# -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
autoCleanupSystemGeneratedSnapshot: ~
# -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
autoCleanupRecurringJobBackupSnapshot: ~
# -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
concurrentAutomaticEngineUpgradePerNodeLimit: ~
# -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
backingImageCleanupWaitInterval: ~
# -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
backingImageRecoveryWaitInterval: ~
# -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
guaranteedInstanceManagerCPU: ~
# -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
kubernetesClusterAutoscalerEnabled: ~
# -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
orphanAutoDeletion: ~
# -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
storageNetwork: ~
# -- Flag that prevents accidental uninstallation of Longhorn.
deletingConfirmationFlag: ~
# -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
engineReplicaTimeout: ~
# -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
snapshotDataIntegrity: ~
# -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
# -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
snapshotDataIntegrityCronjob: ~
# -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
removeSnapshotsDuringFilesystemTrim: ~
# -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
fastReplicaRebuildEnabled: ~
# -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
replicaFileSyncHttpClientTimeout: ~
# -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
logLevel: ~
# -- Setting that allows you to specify a backup compression method.
backupCompressionMethod: ~
# -- Maximum number of worker threads that can concurrently run for each backup.
backupConcurrentLimit: ~
# -- Maximum number of worker threads that can concurrently run for each restore operation.
restoreConcurrentLimit: ~
# -- Setting that allows you to enable the V1 Data Engine.
v1DataEngine: ~
# -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments.
v2DataEngine: ~
# -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
v2DataEngineHugepageLimit: ~
# -- Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine.
offlineReplicaRebuilding: ~
# -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
v2DataEngineGuaranteedInstanceManagerCPU: ~
# -- Setting that allows scheduling of empty node selector volumes to any node.
allowEmptyNodeSelectorVolume: ~
# -- Setting that allows scheduling of empty disk selector volumes to any disk.
allowEmptyDiskSelectorVolume: ~
# -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
allowCollectingLonghornUsageMetrics: ~
# -- Setting that temporarily prevents all attempts to purge volume snapshots.
disableSnapshotPurge: ~
# -- Maximum snapshot count for a volume. The value should be between 2 to 250
snapshotMaxCount: ~
privateRegistry:
# -- Setting that allows you to create a private registry secret.
createSecret: ~
# -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
registryUrl: ~
# -- User account used for authenticating with a private registry.
registryUser: ~
# -- Password for authenticating with a private registry.
registryPasswd: ~
# -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
registrySecret: ~
longhornManager:
log:
# -- Format of Longhorn Manager logs. (Options: "plain", "json")
format: plain
# -- PriorityClass for Longhorn Manager.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager.
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
# -- Annotation for the Longhorn Manager service.
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
# -- PriorityClass for Longhorn Driver.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
# -- Replica count for Longhorn UI.
replicas: 2
# -- PriorityClass for Longhorn UI.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
ingress:
# -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
enabled: false
# -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
# ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
ingressClassName: ~
# -- Hostname of the Layer 7 load balancer.
host: sslip.io
# -- Setting that allows you to enable TLS on ingress records.
tls: false
# -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
secureBackends: false
# -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
tlsSecret: longhorn.local-tls
# -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}.
path: /
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
# -- Ingress annotations in the form of key-value pairs.
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
# -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
enablePSP: false
# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
namespaceOverride: ""
# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
annotations: {}
serviceAccount:
# -- Annotations to add to the service account
annotations: {}
metrics:
serviceMonitor:
# -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
enabled: false
## openshift settings
openshift:
# -- Setting that allows Longhorn to integrate with OpenShift.
enabled: false
ui:
# -- Route for connections between Longhorn and the OpenShift web console.
route: "longhorn-ui"
# -- Port for accessing the OpenShift web console.
port: 443
# -- Port for proxy that provides access to the OpenShift web console.
proxy: 8443
# -- Setting that allows Longhorn to generate code coverage profiles.
enableGoCoverDir: false

View File

@ -0,0 +1,39 @@
--- charts-original/Chart.yaml
+++ charts/Chart.yaml
@@ -1,28 +1,11 @@
+annotations:
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/hidden: "true"
+ catalog.cattle.io/namespace: longhorn-system
+ catalog.cattle.io/release-name: longhorn-crd
apiVersion: v1
appVersion: v1.6.2
-description: Longhorn is a distributed block storage system for Kubernetes.
-home: https://github.com/longhorn/longhorn
-icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
-keywords:
-- longhorn
-- storage
-- distributed
-- block
-- device
-- iscsi
-- nfs
-kubeVersion: '>=1.21.0-0'
-maintainers:
-- email: maintainers@longhorn.io
- name: Longhorn maintainers
-name: longhorn
-sources:
-- https://github.com/longhorn/longhorn
-- https://github.com/longhorn/longhorn-engine
-- https://github.com/longhorn/longhorn-instance-manager
-- https://github.com/longhorn/longhorn-share-manager
-- https://github.com/longhorn/longhorn-manager
-- https://github.com/longhorn/longhorn-ui
-- https://github.com/longhorn/longhorn-tests
-- https://github.com/longhorn/backing-image-manager
+description: Installs the CRDs for longhorn.
+name: longhorn-crd
+type: application
version: 1.6.2

View File

@ -0,0 +1,332 @@
--- charts-original/README.md
+++ charts/README.md
@@ -1,327 +1,2 @@
-# Longhorn Chart
-
-> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
-
-> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
-
-> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/).
-
-## Source Code
-
-Longhorn is 100% open source software. Project source code is spread across a number of repos:
-
-1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
-2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
-3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
-4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
-5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
-6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
-
-## Prerequisites
-
-1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
-2. Kubernetes >= v1.21
-3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
-4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
-
-## Upgrading to Kubernetes v1.25+
-
-Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
-
-As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
-
-> **Note:**
-> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
->
-> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
-Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
-
-As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
-
-## Installation
-
-1. Add Longhorn chart repository.
-```
-helm repo add longhorn https://charts.longhorn.io
-```
-
-2. Update local Longhorn chart information from chart repository.
-```
-helm repo update
-```
-
-3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart.
-
-```
-kubectl create namespace longhorn-system
-helm install longhorn longhorn/longhorn --namespace longhorn-system
-```
-
-## Uninstallation
-
-```
-kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
-helm uninstall longhorn -n longhorn-system
-kubectl delete namespace longhorn-system
-```
-
-## Values
-
-The `values.yaml` contains items used to tweak a deployment of this chart.
-
-### Cattle Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| global.cattle.systemDefaultRegistry | string | `""` | Default system registry. |
-| global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector | string | `"kubernetes.io/os:linux"` | Node selector for system-managed Longhorn components. |
-| global.cattle.windowsCluster.defaultSetting.taintToleration | string | `"cattle.io/os=linux:NoSchedule"` | Toleration for system-managed Longhorn components. |
-| global.cattle.windowsCluster.enabled | bool | `false` | Setting that allows Longhorn to run on a Rancher Windows cluster. |
-| global.cattle.windowsCluster.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for Linux nodes that can run user-deployed Longhorn components. |
-| global.cattle.windowsCluster.tolerations | list | `[{"effect":"NoSchedule","key":"cattle.io/os","operator":"Equal","value":"linux"}]` | Toleration for Linux nodes that can run user-deployed Longhorn components. |
-
-### Network Policies
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| networkPolicies.enabled | bool | `false` | Setting that allows you to enable network policies that control access to Longhorn pods. |
-| networkPolicies.type | string | `"k3s"` | Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1") |
-
-### Image Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| image.csi.attacher.repository | string | `"longhornio/csi-attacher"` | Repository for the CSI attacher image. When unspecified, Longhorn uses the default value. |
-| image.csi.attacher.tag | string | `"v4.5.1"` | Tag for the CSI attacher image. When unspecified, Longhorn uses the default value. |
-| image.csi.livenessProbe.repository | string | `"longhornio/livenessprobe"` | Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value. |
-| image.csi.livenessProbe.tag | string | `"v2.12.0"` | Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value. |
-| image.csi.nodeDriverRegistrar.repository | string | `"longhornio/csi-node-driver-registrar"` | Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. |
-| image.csi.nodeDriverRegistrar.tag | string | `"v2.9.2"` | Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. |
-| image.csi.provisioner.repository | string | `"longhornio/csi-provisioner"` | Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value. |
-| image.csi.provisioner.tag | string | `"v3.6.4"` | Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value. |
-| image.csi.resizer.repository | string | `"longhornio/csi-resizer"` | Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value. |
-| image.csi.resizer.tag | string | `"v1.10.1"` | Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value. |
-| image.csi.snapshotter.repository | string | `"longhornio/csi-snapshotter"` | Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. |
-| image.csi.snapshotter.tag | string | `"v6.3.4"` | Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. |
-| image.longhorn.backingImageManager.repository | string | `"longhornio/backing-image-manager"` | Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value. |
-| image.longhorn.backingImageManager.tag | string | `"v1.6.1"` | Specify Longhorn backing image manager image tag |
-| image.longhorn.engine.repository | string | `"longhornio/longhorn-engine"` | Repository for the Longhorn Engine image. |
-| image.longhorn.engine.tag | string | `"v1.6.1"` | Specify Longhorn engine image tag |
-| image.longhorn.instanceManager.repository | string | `"longhornio/longhorn-instance-manager"` | Repository for the Longhorn Instance Manager image. |
-| image.longhorn.instanceManager.tag | string | `"v1.6.1"` | Specify Longhorn instance manager image tag |
-| image.longhorn.manager.repository | string | `"longhornio/longhorn-manager"` | Repository for the Longhorn Manager image. |
-| image.longhorn.manager.tag | string | `"v1.6.1"` | Specify Longhorn manager image tag |
-| image.longhorn.shareManager.repository | string | `"longhornio/longhorn-share-manager"` | Repository for the Longhorn Share Manager image. |
-| image.longhorn.shareManager.tag | string | `"v1.6.1"` | Specify Longhorn share manager image tag |
-| image.longhorn.supportBundleKit.repository | string | `"longhornio/support-bundle-kit"` | Repository for the Longhorn Support Bundle Manager image. |
-| image.longhorn.supportBundleKit.tag | string | `"v0.0.37"` | Tag for the Longhorn Support Bundle Manager image. |
-| image.longhorn.ui.repository | string | `"longhornio/longhorn-ui"` | Repository for the Longhorn UI image. |
-| image.longhorn.ui.tag | string | `"v1.6.1"` | Specify Longhorn ui image tag |
-| image.openshift.oauthProxy.repository | string | `"longhornio/openshift-origin-oauth-proxy"` | Repository for the OAuth Proxy image. This setting applies only to OpenShift users. |
-| image.openshift.oauthProxy.tag | float | `4.14` | Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14. |
-| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI. |
-
-### Service Settings
-
-| Key | Description |
-|-----|-------------|
-| service.manager.nodePort | NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767. |
-| service.manager.type | Service type for Longhorn Manager. |
-| service.ui.nodePort | NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767. |
-| service.ui.type | Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy") |
-
-### StorageClass Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| persistence.backingImage.dataSourceParameters | string | `nil` | Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) |
-| persistence.backingImage.dataSourceType | string | `nil` | Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type. |
-| persistence.backingImage.enable | bool | `false` | Setting that allows you to use a backing image in a Longhorn StorageClass. |
-| persistence.backingImage.expectedChecksum | string | `nil` | Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. |
-| persistence.backingImage.name | string | `nil` | Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image. |
-| persistence.defaultClass | bool | `true` | Setting that allows you to specify the default Longhorn StorageClass. |
-| persistence.defaultClassReplicaCount | int | `3` | Replica count of the default Longhorn StorageClass. |
-| persistence.defaultDataLocality | string | `"disabled"` | Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort") |
-| persistence.defaultFsType | string | `"ext4"` | Filesystem type of the default Longhorn StorageClass. |
-| persistence.defaultMkfsParams | string | `""` | mkfs parameters of the default Longhorn StorageClass. |
-| persistence.defaultNodeSelector.enable | bool | `false` | Setting that allows you to enable the node selector for the default Longhorn StorageClass. |
-| persistence.defaultNodeSelector.selector | string | `""` | Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast") |
-| persistence.migratable | bool | `false` | Setting that allows you to enable live migration of a Longhorn volume from one node to another. |
-| persistence.nfsOptions | string | `""` | Set NFS mount options for Longhorn StorageClass for RWX volumes |
-| persistence.reclaimPolicy | string | `"Delete"` | Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete") |
-| persistence.recurringJobSelector.enable | bool | `false` | Setting that allows you to enable the recurring job selector for a Longhorn StorageClass. |
-| persistence.recurringJobSelector.jobList | list | `[]` | Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`) |
-| persistence.removeSnapshotsDuringFilesystemTrim | string | `"ignored"` | Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled") |
-
-### CSI Settings
-
-| Key | Description |
-|-----|-------------|
-| csi.attacherReplicaCount | Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3"). |
-| csi.kubeletRootDir | kubelet root directory. When unspecified, Longhorn uses the default value. |
-| csi.provisionerReplicaCount | Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3"). |
-| csi.resizerReplicaCount | Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3"). |
-| csi.snapshotterReplicaCount | Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3"). |
-
-### Longhorn Manager Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornManager.log.format | string | `"plain"` | Format of Longhorn Manager logs. (Options: "plain", "json") |
-| longhornManager.nodeSelector | object | `{}` | Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager. |
-| longhornManager.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Manager. |
-| longhornManager.serviceAnnotations | object | `{}` | Annotation for the Longhorn Manager service. |
-| longhornManager.tolerations | list | `[]` | Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager. |
-
-### Longhorn Driver Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornDriver.nodeSelector | object | `{}` | Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver. |
-| longhornDriver.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Driver. |
-| longhornDriver.tolerations | list | `[]` | Toleration for Longhorn Driver on nodes allowed to run Longhorn components. |
-
-### Longhorn UI Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornUI.nodeSelector | object | `{}` | Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI. |
-| longhornUI.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn UI. |
-| longhornUI.replicas | int | `2` | Replica count for Longhorn UI. |
-| longhornUI.tolerations | list | `[]` | Toleration for Longhorn UI on nodes allowed to run Longhorn components. |
-
-### Ingress Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| ingress.annotations | string | `nil` | Ingress annotations in the form of key-value pairs. |
-| ingress.enabled | bool | `false` | Setting that allows Longhorn to generate ingress records for the Longhorn UI service. |
-| ingress.host | string | `"sslip.io"` | Hostname of the Layer 7 load balancer. |
-| ingress.ingressClassName | string | `nil` | IngressClass resource that contains ingress configuration, including the name of the Ingress controller. ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases. |
-| ingress.path | string | `"/"` | Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}. |
-| ingress.secrets | string | `nil` | Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses. |
-| ingress.secureBackends | bool | `false` | Setting that allows you to enable secure connections to the Longhorn UI service via port 443. |
-| ingress.tls | bool | `false` | Setting that allows you to enable TLS on ingress records. |
-| ingress.tlsSecret | string | `"longhorn.local-tls"` | TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records. |
-
-### Private Registry Settings
-
-You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs).
-
-| Key | Description |
-|-----|-------------|
-| privateRegistry.createSecret | Setting that allows you to create a private registry secret. |
-| privateRegistry.registryPasswd | Password for authenticating with a private registry. |
-| privateRegistry.registrySecret | Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name. |
-| privateRegistry.registryUrl | URL of a private registry. When unspecified, Longhorn uses the default system registry. |
-| privateRegistry.registryUser | User account used for authenticating with a private registry. |
-
-### OS/Kubernetes Distro Settings
-
-#### OpenShift Settings
-
-For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md).
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| openshift.enabled | bool | `false` | Setting that allows Longhorn to integrate with OpenShift. |
-| openshift.ui.port | int | `443` | Port for accessing the OpenShift web console. |
-| openshift.ui.proxy | int | `8443` | Port for proxy that provides access to the OpenShift web console. |
-| openshift.ui.route | string | `"longhorn-ui"` | Route for connections between Longhorn and the OpenShift web console. |
-
-### Other Settings
-
-| Key | Default | Description |
-|-----|---------|-------------|
-| annotations | `{}` | Annotation for the Longhorn Manager DaemonSet pods. This setting is optional. |
-| enableGoCoverDir | `false` | Setting that allows Longhorn to generate code coverage profiles. |
-| enablePSP | `false` | Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled. |
-| metrics.serviceMonitor.enabled | `false` | Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components. |
-| namespaceOverride | `""` | Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`. |
-| preUpgradeChecker.jobEnabled | `true` | Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions. |
-| preUpgradeChecker.upgradeVersionCheck | `true` | Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled. |
-
-### System Default Settings
-
-During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs).
-
-| Key | Description |
-|-----|-------------|
-| defaultSettings.allowCollectingLonghornUsageMetrics | Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses. |
-| defaultSettings.allowEmptyDiskSelectorVolume | Setting that allows scheduling of empty disk selector volumes to any disk. |
-| defaultSettings.allowEmptyNodeSelectorVolume | Setting that allows scheduling of empty node selector volumes to any node. |
-| defaultSettings.allowRecurringJobWhileVolumeDetached | Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run. |
-| defaultSettings.allowVolumeCreationWithDegradedAvailability | Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation. |
-| defaultSettings.autoCleanupRecurringJobBackupSnapshot | Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job. |
-| defaultSettings.autoCleanupSystemGeneratedSnapshot | Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed. |
-| defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly | Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting. |
-| defaultSettings.autoSalvage | Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default. |
-| defaultSettings.backingImageCleanupWaitInterval | Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it. |
-| defaultSettings.backingImageRecoveryWaitInterval | Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown". |
-| defaultSettings.backupCompressionMethod | Setting that allows you to specify a backup compression method. |
-| defaultSettings.backupConcurrentLimit | Maximum number of worker threads that can concurrently run for each backup. |
-| defaultSettings.backupTarget | Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE") |
-| defaultSettings.backupTargetCredentialSecret | Name of the Kubernetes secret associated with the backup target. |
-| defaultSettings.backupstorePollInterval | Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled. |
-| defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit | Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version. |
-| defaultSettings.concurrentReplicaRebuildPerNodeLimit | Maximum number of replicas that can be concurrently rebuilt on each node. |
-| defaultSettings.concurrentVolumeBackupRestorePerNodeLimit | Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled. |
-| defaultSettings.createDefaultDiskLabeledNodes | Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster. |
-| defaultSettings.defaultDataLocality | Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume. |
-| defaultSettings.defaultDataPath | Default path for storing data on a host. The default value is "/var/lib/longhorn/". |
-| defaultSettings.defaultLonghornStaticStorageClass | Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static". |
-| defaultSettings.defaultReplicaCount | Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3". |
-| defaultSettings.deletingConfirmationFlag | Flag that prevents accidental uninstallation of Longhorn. |
-| defaultSettings.detachManuallyAttachedVolumesWhenCordoned | Setting that allows automatic detaching of manually-attached volumes when a node is cordoned. |
-| defaultSettings.disableRevisionCounter | Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI. |
-| defaultSettings.disableSchedulingOnCordonedNode | Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default. |
-| defaultSettings.disableSnapshotPurge | Setting that temporarily prevents all attempts to purge volume snapshots. |
-| defaultSettings.engineReplicaTimeout | Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8". |
-| defaultSettings.failedBackupTTL | Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled. |
-| defaultSettings.fastReplicaRebuildEnabled | Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check". |
-| defaultSettings.guaranteedInstanceManagerCPU | Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12". |
-| defaultSettings.kubernetesClusterAutoscalerEnabled | Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. |
-| defaultSettings.logLevel | Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace") |
-| defaultSettings.nodeDownPodDeletionPolicy | Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed. |
-| defaultSettings.nodeDrainPolicy | Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained. |
-| defaultSettings.offlineReplicaRebuilding | Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine. |
-| defaultSettings.orphanAutoDeletion | Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up. |
-| defaultSettings.priorityClass | PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. |
-| defaultSettings.recurringFailedJobsHistoryLimit | Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained. |
-| defaultSettings.recurringJobMaxRetention | Maximum number of snapshots or backups to be retained. |
-| defaultSettings.recurringSuccessfulJobsHistoryLimit | Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained. |
-| defaultSettings.removeSnapshotsDuringFilesystemTrim | Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files. |
-| defaultSettings.replicaAutoBalance | Setting that automatically rebalances replicas when an available node is discovered. |
-| defaultSettings.replicaDiskSoftAntiAffinity | Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default. |
-| defaultSettings.replicaFileSyncHttpClientTimeout | Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed. |
-| defaultSettings.replicaReplenishmentWaitInterval | Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume. |
-| defaultSettings.replicaSoftAntiAffinity | Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default. |
-| defaultSettings.replicaZoneSoftAntiAffinity | Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object. |
-| defaultSettings.restoreConcurrentLimit | Maximum number of worker threads that can concurrently run for each restore operation. |
-| defaultSettings.restoreVolumeRecurringJobs | Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration. |
-| defaultSettings.snapshotDataIntegrity | Setting that allows you to enable and disable snapshot hashing and data integrity checks. |
-| defaultSettings.snapshotDataIntegrityCronjob | Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format. |
-| defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation | Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance. |
-| defaultSettings.snapshotMaxCount | Maximum snapshot count for a volume. The value should be between 2 to 250 |
-| defaultSettings.storageMinimalAvailablePercentage | Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25". |
-| defaultSettings.storageNetwork | Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network. |
-| defaultSettings.storageOverProvisioningPercentage | Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100". |
-| defaultSettings.storageReservedPercentageForDefaultDisk | Percentage of disk space that is not allocated to the default disk on each new Longhorn node. |
-| defaultSettings.supportBundleFailedHistoryLimit | Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles. |
-| defaultSettings.systemManagedComponentsNodeSelector | Node selector for system-managed Longhorn components. |
-| defaultSettings.systemManagedPodsImagePullPolicy | Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart. |
-| defaultSettings.taintToleration | Taint or toleration for system-managed Longhorn components. Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect). |
-| defaultSettings.upgradeChecker | Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default |
-| defaultSettings.v1DataEngine | Setting that allows you to enable the V1 Data Engine. |
-| defaultSettings.v2DataEngine | Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. |
-| defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU | Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". |
-| defaultSettings.v2DataEngineHugepageLimit | Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine. |
-
----
-Please see [link](https://github.com/longhorn/longhorn) for more information.
+# longhorn-crd
+A Rancher chart that installs the CRDs used by longhorn.

View File

@ -0,0 +1,5 @@
url: https://github.com/longhorn/charts.git
subdirectory: charts/longhorn
commit: ad73dc01239b7eeb25ff510ce8358578433d85a5
version: 104.1.0
doNotRelease: false

View File

@ -0,0 +1,239 @@
# Longhorn Chart
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/).
## Source Code
Longhorn is 100% open source software. Project source code is spread across a number of repos:
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
## Prerequisites
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
2. Kubernetes >= v1.21
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Installation
1. Add Longhorn chart repository.
```
helm repo add longhorn https://charts.longhorn.io
```
2. Update local Longhorn chart information from chart repository.
```
helm repo update
```
3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart.
```
kubectl create namespace longhorn-system
helm install longhorn longhorn/longhorn --namespace longhorn-system
```
## Uninstallation
```
kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
helm uninstall longhorn -n longhorn-system
kubectl delete namespace longhorn-system
```
## Values
The `values.yaml` contains items used to tweak a deployment of this chart.
### Cattle Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "global" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Network Policies
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "networkPolicies" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Image Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "image" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Service Settings
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if (and (hasPrefix "service" .Key) (not (contains "Account" .Key))) }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### StorageClass Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "persistence" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### CSI Settings
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "csi" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn Manager Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornManager" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn Driver Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornDriver" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Longhorn UI Settings
Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "longhornUI" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Ingress Settings
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "ingress" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Private Registry Settings
You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs).
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "privateRegistry" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### OS/Kubernetes Distro Settings
#### OpenShift Settings
For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md).
| Key | Type | Default | Description |
|-----|------|---------|-------------|
{{- range .Values }}
{{- if hasPrefix "openshift" .Key }}
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### Other Settings
| Key | Default | Description |
|-----|---------|-------------|
{{- range .Values }}
{{- if not (or (hasPrefix "defaultSettings" .Key)
(hasPrefix "networkPolicies" .Key)
(hasPrefix "image" .Key)
(hasPrefix "service" .Key)
(hasPrefix "persistence" .Key)
(hasPrefix "csi" .Key)
(hasPrefix "longhornManager" .Key)
(hasPrefix "longhornDriver" .Key)
(hasPrefix "longhornUI" .Key)
(hasPrefix "privateRegistry" .Key)
(hasPrefix "ingress" .Key)
(hasPrefix "openshift" .Key)
(hasPrefix "global" .Key)) }}
| {{ .Key }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
### System Default Settings
During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs).
| Key | Description |
|-----|-------------|
{{- range .Values }}
{{- if hasPrefix "defaultSettings" .Key }}
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
{{- end }}
{{- end }}
---
Please see [link](https://github.com/longhorn/longhorn) for more information.

View File

@ -0,0 +1,177 @@
# OpenShift / OKD Extra Configuration Steps
- [OpenShift / OKD Extra Configuration Steps](#openshift--okd-extra-configuration-steps)
- [Notes](#notes)
- [Known Issues](#known-issues)
- [Preparing Nodes (Optional)](#preparing-nodes-optional)
- [Default /var/lib/longhorn setup](#default-varliblonghorn-setup)
- [Separate /var/mnt/longhorn setup](#separate-varmntlonghorn-setup)
- [Create Filesystem](#create-filesystem)
- [Mounting Disk On Boot](#mounting-disk-on-boot)
- [Label and Annotate Nodes](#label-and-annotate-nodes)
- [Example values.yaml](#example-valuesyaml)
- [Installation](#installation)
- [Refs](#refs)
## Notes
Main changes and tasks for OCP are:
- On OCP / OKD, the Operating System is Managed by the Cluster
- OCP Imposes [Security Context Constraints](https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html)
- This requires everything to run with the least privilege possible. For the moment every component has been given access to run as higher privilege.
- Something to circle back on is network polices and which components can have their privileges reduced without impacting functionality.
- The UI probably can be for example.
- openshift/oauth-proxy for authentication to the Longhorn Ui
- **⚠️** Currently Scoped to Authenticated Users that can delete a longhorn settings object.
- **⚠️** Since the UI it self is not protected, network policies will need to be created to prevent namespace <--> namespace communication against the pod or service object directly.
- Anyone with access to the UI Deployment can remove the route restriction. (Namespace Scoped Admin)
- Option to use separate disk in /var/mnt/longhorn & MachineConfig file to mount /var/mnt/longhorn
- Adding finalizers for mount propagation
## Known Issues
- General Feature/Issue Thread
- [[FEATURE] Deploying Longhorn on OKD/Openshift](https://github.com/longhorn/longhorn/issues/1831)
- 4.10 / 1.23:
- 4.10.0-0.okd-2022-03-07-131213 to 4.10.0-0.okd-2022-07-09-073606
- Tested, No Known Issues
- 4.11 / 1.24:
- 4.11.0-0.okd-2022-07-27-052000 to 4.11.0-0.okd-2022-11-19-050030
- Tested, No Known Issues
- 4.11.0-0.okd-2022-12-02-145640, 4.11.0-0.okd-2023-01-14-152430:
- Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988)
- [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772)
- 4.12 / 1.25:
- 4.12.0-0.okd-2022-12-05-210624 to 4.12.0-0.okd-2023-01-20-101927
- Tested, No Known Issues
- 4.12.0-0.okd-2023-01-21-055900 to 4.12.0-0.okd-2023-02-18-033438:
- Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988)
- [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772)
- 4.12.0-0.okd-2023-03-05-022504 - 4.12.0-0.okd-2023-04-16-041331:
- Tested, No Known Issues
- 4.13 / 1.26:
- 4.13.0-0.okd-2023-05-03-001308 - 4.13.0-0.okd-2023-08-18-135805:
- Tested, No Known Issues
- 4.14 / 1.27:
- 4.14.0-0.okd-2023-08-12-022330 - 4.14.0-0.okd-2023-10-28-073550:
- Tested, No Known Issues
## Preparing Nodes (Optional)
Only required if you require additional customizations, such as storage-less nodes, or secondary disks.
### Default /var/lib/longhorn setup
Label each node for storage with:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc label node "${NODE}" node.longhorn.io/create-default-disk=true
```
### Separate /var/mnt/longhorn setup
#### Create Filesystem
On the storage nodes create a filesystem with the label longhorn:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc debug node/${NODE} -t -- chroot /host bash
# Validate Target Drive is Present
lsblk
export DRIVE="sdb" #vdb
sudo mkfs.ext4 -L longhorn /dev/${DRIVE}
```
> ⚠️ Note: If you add New Nodes After the below Machine Config is applied, you will need to also reboot the node.
#### Mounting Disk On Boot
The Secondary Drive needs to be mounted on every boot. Save the Concents and Apply the MachineConfig with `oc apply -f`:
> ⚠️ This will trigger an machine config profile update and reboot all worker nodes on the cluster
```yaml
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 71-mount-storage-worker
spec:
config:
ignition:
version: 3.2.0
systemd:
units:
- name: var-mnt-longhorn.mount
enabled: true
contents: |
[Unit]
Before=local-fs.target
[Mount]
Where=/var/mnt/longhorn
What=/dev/disk/by-label/longhorn
Options=rw,relatime,discard
[Install]
WantedBy=local-fs.target
```
#### Label and Annotate Nodes
Label and annotate storage nodes like this:
```bash
oc get nodes --no-headers | awk '{print $1}'
export NODE="worker-0"
oc annotate node ${NODE} --overwrite node.longhorn.io/default-disks-config='[{"path":"/var/mnt/longhorn","allowScheduling":true}]'
oc label node ${NODE} node.longhorn.io/create-default-disk=config
```
## Example values.yaml
Minimum Adjustments Required
```yaml
openshift:
oauthProxy:
repository: quay.io/openshift/origin-oauth-proxy
tag: 4.14 # Use Your OCP/OKD 4.X Version, Current Stable is 4.14
# defaultSettings: # Preparing nodes (Optional)
# createDefaultDiskLabeledNodes: true
openshift:
enabled: true
ui:
route: "longhorn-ui"
port: 443
proxy: 8443
```
## Installation
```bash
# helm template ./chart/ --namespace longhorn-system --values ./chart/values.yaml --no-hooks > longhorn.yaml # Local Testing
helm template longhorn --namespace longhorn-system --values values.yaml --no-hooks > longhorn.yaml
oc create namespace longhorn-system -o yaml --dry-run=client | oc apply -f -
oc apply -f longhorn.yaml -n longhorn-system
```
## Refs
- <https://docs.openshift.com/container-platform/4.11/storage/persistent_storage/persistent-storage-iscsi.html>
- <https://docs.okd.io/4.11/storage/persistent_storage/persistent-storage-iscsi.html>
- okd 4.5: <https://github.com/longhorn/longhorn/issues/1831#issuecomment-702690613>
- okd 4.6: <https://github.com/longhorn/longhorn/issues/1831#issuecomment-765884631>
- oauth-proxy: <https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml>
- <https://github.com/longhorn/longhorn/issues/1831>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,53 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "*" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "longhorn-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: [ "longhorn.io" ]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status",
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
"recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
"supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
"volumeattachments", "volumeattachments/status"]
verbs: [ "get", "list", "watch" ]

View File

@ -0,0 +1,35 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageDataSource" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}}
# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Backup" false -}}
# {{- set $found "longhorn.io/v1beta2/BackupBackingImage" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupTarget" false -}}
# {{- set $found "longhorn.io/v1beta1/BackupVolume" false -}}
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
# {{- set $found "longhorn.io/v1beta2/Orphan" false -}}
# {{- set $found "longhorn.io/v1beta1/RecurringJob" false -}}
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
# {{- set $found "longhorn.io/v1beta2/Snapshot" false -}}
# {{- set $found "longhorn.io/v1beta2/SupportBundle" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemBackup" false -}}
# {{- set $found "longhorn.io/v1beta2/SystemRestore" false -}}
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
# {{- set $found "longhorn.io/v1beta2/VolumeAttachment" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,18 @@
--- charts-original/Chart.yaml
+++ charts/Chart.yaml
@@ -1,3 +1,15 @@
+annotations:
+ catalog.cattle.io/auto-install: longhorn-crd=match
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/display-name: Longhorn
+ catalog.cattle.io/kube-version: '>= 1.21.0-0'
+ catalog.cattle.io/namespace: longhorn-system
+ catalog.cattle.io/permits-os: linux,windows
+ catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
+ catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
+ catalog.cattle.io/release-name: longhorn
+ catalog.cattle.io/type: cluster-tool
+ catalog.cattle.io/upstream-version: 1.6.2
apiVersion: v1
appVersion: v1.6.2
description: Longhorn is a distributed block storage system for Kubernetes.

View File

@ -0,0 +1,296 @@
--- charts-original/README.md
+++ charts/README.md
@@ -38,290 +38,13 @@
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
-## Installation
-
-1. Add Longhorn chart repository.
-```
-helm repo add longhorn https://charts.longhorn.io
-```
-
-2. Update local Longhorn chart information from chart repository.
-```
-helm repo update
-```
-
-3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart.
-
-```
-kubectl create namespace longhorn-system
-helm install longhorn longhorn/longhorn --namespace longhorn-system
-```
-
## Uninstallation
-```
-kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
-helm uninstall longhorn -n longhorn-system
-kubectl delete namespace longhorn-system
-```
-
-## Values
-
-The `values.yaml` contains items used to tweak a deployment of this chart.
-
-### Cattle Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| global.cattle.systemDefaultRegistry | string | `""` | Default system registry. |
-| global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector | string | `"kubernetes.io/os:linux"` | Node selector for system-managed Longhorn components. |
-| global.cattle.windowsCluster.defaultSetting.taintToleration | string | `"cattle.io/os=linux:NoSchedule"` | Toleration for system-managed Longhorn components. |
-| global.cattle.windowsCluster.enabled | bool | `false` | Setting that allows Longhorn to run on a Rancher Windows cluster. |
-| global.cattle.windowsCluster.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for Linux nodes that can run user-deployed Longhorn components. |
-| global.cattle.windowsCluster.tolerations | list | `[{"effect":"NoSchedule","key":"cattle.io/os","operator":"Equal","value":"linux"}]` | Toleration for Linux nodes that can run user-deployed Longhorn components. |
-
-### Network Policies
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| networkPolicies.enabled | bool | `false` | Setting that allows you to enable network policies that control access to Longhorn pods. |
-| networkPolicies.type | string | `"k3s"` | Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1") |
-
-### Image Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| image.csi.attacher.repository | string | `"longhornio/csi-attacher"` | Repository for the CSI attacher image. When unspecified, Longhorn uses the default value. |
-| image.csi.attacher.tag | string | `"v4.5.1"` | Tag for the CSI attacher image. When unspecified, Longhorn uses the default value. |
-| image.csi.livenessProbe.repository | string | `"longhornio/livenessprobe"` | Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value. |
-| image.csi.livenessProbe.tag | string | `"v2.12.0"` | Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value. |
-| image.csi.nodeDriverRegistrar.repository | string | `"longhornio/csi-node-driver-registrar"` | Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. |
-| image.csi.nodeDriverRegistrar.tag | string | `"v2.9.2"` | Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. |
-| image.csi.provisioner.repository | string | `"longhornio/csi-provisioner"` | Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value. |
-| image.csi.provisioner.tag | string | `"v3.6.4"` | Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value. |
-| image.csi.resizer.repository | string | `"longhornio/csi-resizer"` | Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value. |
-| image.csi.resizer.tag | string | `"v1.10.1"` | Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value. |
-| image.csi.snapshotter.repository | string | `"longhornio/csi-snapshotter"` | Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. |
-| image.csi.snapshotter.tag | string | `"v6.3.4"` | Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. |
-| image.longhorn.backingImageManager.repository | string | `"longhornio/backing-image-manager"` | Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value. |
-| image.longhorn.backingImageManager.tag | string | `"v1.6.1"` | Specify Longhorn backing image manager image tag |
-| image.longhorn.engine.repository | string | `"longhornio/longhorn-engine"` | Repository for the Longhorn Engine image. |
-| image.longhorn.engine.tag | string | `"v1.6.1"` | Specify Longhorn engine image tag |
-| image.longhorn.instanceManager.repository | string | `"longhornio/longhorn-instance-manager"` | Repository for the Longhorn Instance Manager image. |
-| image.longhorn.instanceManager.tag | string | `"v1.6.1"` | Specify Longhorn instance manager image tag |
-| image.longhorn.manager.repository | string | `"longhornio/longhorn-manager"` | Repository for the Longhorn Manager image. |
-| image.longhorn.manager.tag | string | `"v1.6.1"` | Specify Longhorn manager image tag |
-| image.longhorn.shareManager.repository | string | `"longhornio/longhorn-share-manager"` | Repository for the Longhorn Share Manager image. |
-| image.longhorn.shareManager.tag | string | `"v1.6.1"` | Specify Longhorn share manager image tag |
-| image.longhorn.supportBundleKit.repository | string | `"longhornio/support-bundle-kit"` | Repository for the Longhorn Support Bundle Manager image. |
-| image.longhorn.supportBundleKit.tag | string | `"v0.0.37"` | Tag for the Longhorn Support Bundle Manager image. |
-| image.longhorn.ui.repository | string | `"longhornio/longhorn-ui"` | Repository for the Longhorn UI image. |
-| image.longhorn.ui.tag | string | `"v1.6.1"` | Specify Longhorn ui image tag |
-| image.openshift.oauthProxy.repository | string | `"longhornio/openshift-origin-oauth-proxy"` | Repository for the OAuth Proxy image. This setting applies only to OpenShift users. |
-| image.openshift.oauthProxy.tag | float | `4.14` | Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14. |
-| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI. |
-
-### Service Settings
-
-| Key | Description |
-|-----|-------------|
-| service.manager.nodePort | NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767. |
-| service.manager.type | Service type for Longhorn Manager. |
-| service.ui.nodePort | NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767. |
-| service.ui.type | Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy") |
-
-### StorageClass Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| persistence.backingImage.dataSourceParameters | string | `nil` | Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) |
-| persistence.backingImage.dataSourceType | string | `nil` | Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type. |
-| persistence.backingImage.enable | bool | `false` | Setting that allows you to use a backing image in a Longhorn StorageClass. |
-| persistence.backingImage.expectedChecksum | string | `nil` | Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. |
-| persistence.backingImage.name | string | `nil` | Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image. |
-| persistence.defaultClass | bool | `true` | Setting that allows you to specify the default Longhorn StorageClass. |
-| persistence.defaultClassReplicaCount | int | `3` | Replica count of the default Longhorn StorageClass. |
-| persistence.defaultDataLocality | string | `"disabled"` | Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort") |
-| persistence.defaultFsType | string | `"ext4"` | Filesystem type of the default Longhorn StorageClass. |
-| persistence.defaultMkfsParams | string | `""` | mkfs parameters of the default Longhorn StorageClass. |
-| persistence.defaultNodeSelector.enable | bool | `false` | Setting that allows you to enable the node selector for the default Longhorn StorageClass. |
-| persistence.defaultNodeSelector.selector | string | `""` | Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast") |
-| persistence.migratable | bool | `false` | Setting that allows you to enable live migration of a Longhorn volume from one node to another. |
-| persistence.nfsOptions | string | `""` | Set NFS mount options for Longhorn StorageClass for RWX volumes |
-| persistence.reclaimPolicy | string | `"Delete"` | Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete") |
-| persistence.recurringJobSelector.enable | bool | `false` | Setting that allows you to enable the recurring job selector for a Longhorn StorageClass. |
-| persistence.recurringJobSelector.jobList | list | `[]` | Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`) |
-| persistence.removeSnapshotsDuringFilesystemTrim | string | `"ignored"` | Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled") |
-
-### CSI Settings
-
-| Key | Description |
-|-----|-------------|
-| csi.attacherReplicaCount | Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3"). |
-| csi.kubeletRootDir | kubelet root directory. When unspecified, Longhorn uses the default value. |
-| csi.provisionerReplicaCount | Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3"). |
-| csi.resizerReplicaCount | Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3"). |
-| csi.snapshotterReplicaCount | Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3"). |
-
-### Longhorn Manager Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornManager.log.format | string | `"plain"` | Format of Longhorn Manager logs. (Options: "plain", "json") |
-| longhornManager.nodeSelector | object | `{}` | Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager. |
-| longhornManager.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Manager. |
-| longhornManager.serviceAnnotations | object | `{}` | Annotation for the Longhorn Manager service. |
-| longhornManager.tolerations | list | `[]` | Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager. |
-
-### Longhorn Driver Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornDriver.nodeSelector | object | `{}` | Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver. |
-| longhornDriver.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Driver. |
-| longhornDriver.tolerations | list | `[]` | Toleration for Longhorn Driver on nodes allowed to run Longhorn components. |
-
-### Longhorn UI Settings
-
-Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI.
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| longhornUI.nodeSelector | object | `{}` | Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI. |
-| longhornUI.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn UI. |
-| longhornUI.replicas | int | `2` | Replica count for Longhorn UI. |
-| longhornUI.tolerations | list | `[]` | Toleration for Longhorn UI on nodes allowed to run Longhorn components. |
-
-### Ingress Settings
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| ingress.annotations | string | `nil` | Ingress annotations in the form of key-value pairs. |
-| ingress.enabled | bool | `false` | Setting that allows Longhorn to generate ingress records for the Longhorn UI service. |
-| ingress.host | string | `"sslip.io"` | Hostname of the Layer 7 load balancer. |
-| ingress.ingressClassName | string | `nil` | IngressClass resource that contains ingress configuration, including the name of the Ingress controller. ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases. |
-| ingress.path | string | `"/"` | Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}. |
-| ingress.secrets | string | `nil` | Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses. |
-| ingress.secureBackends | bool | `false` | Setting that allows you to enable secure connections to the Longhorn UI service via port 443. |
-| ingress.tls | bool | `false` | Setting that allows you to enable TLS on ingress records. |
-| ingress.tlsSecret | string | `"longhorn.local-tls"` | TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records. |
-
-### Private Registry Settings
-
-You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs).
-
-| Key | Description |
-|-----|-------------|
-| privateRegistry.createSecret | Setting that allows you to create a private registry secret. |
-| privateRegistry.registryPasswd | Password for authenticating with a private registry. |
-| privateRegistry.registrySecret | Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name. |
-| privateRegistry.registryUrl | URL of a private registry. When unspecified, Longhorn uses the default system registry. |
-| privateRegistry.registryUser | User account used for authenticating with a private registry. |
-
-### OS/Kubernetes Distro Settings
-
-#### OpenShift Settings
-
-For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md).
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| openshift.enabled | bool | `false` | Setting that allows Longhorn to integrate with OpenShift. |
-| openshift.ui.port | int | `443` | Port for accessing the OpenShift web console. |
-| openshift.ui.proxy | int | `8443` | Port for proxy that provides access to the OpenShift web console. |
-| openshift.ui.route | string | `"longhorn-ui"` | Route for connections between Longhorn and the OpenShift web console. |
-
-### Other Settings
-
-| Key | Default | Description |
-|-----|---------|-------------|
-| annotations | `{}` | Annotation for the Longhorn Manager DaemonSet pods. This setting is optional. |
-| enableGoCoverDir | `false` | Setting that allows Longhorn to generate code coverage profiles. |
-| enablePSP | `false` | Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled. |
-| metrics.serviceMonitor.enabled | `false` | Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components. |
-| namespaceOverride | `""` | Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`. |
-| preUpgradeChecker.jobEnabled | `true` | Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions. |
-| preUpgradeChecker.upgradeVersionCheck | `true` | Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled. |
-
-### System Default Settings
+To prevent Longhorn from being accidentally uninstalled (which leads to data lost), we introduce a new setting, deleting-confirmation-flag. If this flag is **false**, the Longhorn uninstallation job will fail. Set this flag to **true** to allow Longhorn uninstallation. You can set this flag using setting page in Longhorn UI or `kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag`
-During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs).
+To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
-| Key | Description |
-|-----|-------------|
-| defaultSettings.allowCollectingLonghornUsageMetrics | Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses. |
-| defaultSettings.allowEmptyDiskSelectorVolume | Setting that allows scheduling of empty disk selector volumes to any disk. |
-| defaultSettings.allowEmptyNodeSelectorVolume | Setting that allows scheduling of empty node selector volumes to any node. |
-| defaultSettings.allowRecurringJobWhileVolumeDetached | Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run. |
-| defaultSettings.allowVolumeCreationWithDegradedAvailability | Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation. |
-| defaultSettings.autoCleanupRecurringJobBackupSnapshot | Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job. |
-| defaultSettings.autoCleanupSystemGeneratedSnapshot | Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed. |
-| defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly | Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting. |
-| defaultSettings.autoSalvage | Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default. |
-| defaultSettings.backingImageCleanupWaitInterval | Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it. |
-| defaultSettings.backingImageRecoveryWaitInterval | Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown". |
-| defaultSettings.backupCompressionMethod | Setting that allows you to specify a backup compression method. |
-| defaultSettings.backupConcurrentLimit | Maximum number of worker threads that can concurrently run for each backup. |
-| defaultSettings.backupTarget | Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE") |
-| defaultSettings.backupTargetCredentialSecret | Name of the Kubernetes secret associated with the backup target. |
-| defaultSettings.backupstorePollInterval | Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled. |
-| defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit | Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version. |
-| defaultSettings.concurrentReplicaRebuildPerNodeLimit | Maximum number of replicas that can be concurrently rebuilt on each node. |
-| defaultSettings.concurrentVolumeBackupRestorePerNodeLimit | Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled. |
-| defaultSettings.createDefaultDiskLabeledNodes | Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster. |
-| defaultSettings.defaultDataLocality | Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume. |
-| defaultSettings.defaultDataPath | Default path for storing data on a host. The default value is "/var/lib/longhorn/". |
-| defaultSettings.defaultLonghornStaticStorageClass | Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static". |
-| defaultSettings.defaultReplicaCount | Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3". |
-| defaultSettings.deletingConfirmationFlag | Flag that prevents accidental uninstallation of Longhorn. |
-| defaultSettings.detachManuallyAttachedVolumesWhenCordoned | Setting that allows automatic detaching of manually-attached volumes when a node is cordoned. |
-| defaultSettings.disableRevisionCounter | Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI. |
-| defaultSettings.disableSchedulingOnCordonedNode | Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default. |
-| defaultSettings.disableSnapshotPurge | Setting that temporarily prevents all attempts to purge volume snapshots. |
-| defaultSettings.engineReplicaTimeout | Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8". |
-| defaultSettings.failedBackupTTL | Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled. |
-| defaultSettings.fastReplicaRebuildEnabled | Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check". |
-| defaultSettings.guaranteedInstanceManagerCPU | Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12". |
-| defaultSettings.kubernetesClusterAutoscalerEnabled | Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. |
-| defaultSettings.logLevel | Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace") |
-| defaultSettings.nodeDownPodDeletionPolicy | Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed. |
-| defaultSettings.nodeDrainPolicy | Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained. |
-| defaultSettings.offlineReplicaRebuilding | Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine. |
-| defaultSettings.orphanAutoDeletion | Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up. |
-| defaultSettings.priorityClass | PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. |
-| defaultSettings.recurringFailedJobsHistoryLimit | Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained. |
-| defaultSettings.recurringJobMaxRetention | Maximum number of snapshots or backups to be retained. |
-| defaultSettings.recurringSuccessfulJobsHistoryLimit | Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained. |
-| defaultSettings.removeSnapshotsDuringFilesystemTrim | Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files. |
-| defaultSettings.replicaAutoBalance | Setting that automatically rebalances replicas when an available node is discovered. |
-| defaultSettings.replicaDiskSoftAntiAffinity | Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default. |
-| defaultSettings.replicaFileSyncHttpClientTimeout | Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed. |
-| defaultSettings.replicaReplenishmentWaitInterval | Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume. |
-| defaultSettings.replicaSoftAntiAffinity | Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default. |
-| defaultSettings.replicaZoneSoftAntiAffinity | Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object. |
-| defaultSettings.restoreConcurrentLimit | Maximum number of worker threads that can concurrently run for each restore operation. |
-| defaultSettings.restoreVolumeRecurringJobs | Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration. |
-| defaultSettings.snapshotDataIntegrity | Setting that allows you to enable and disable snapshot hashing and data integrity checks. |
-| defaultSettings.snapshotDataIntegrityCronjob | Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format. |
-| defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation | Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance. |
-| defaultSettings.snapshotMaxCount | Maximum snapshot count for a volume. The value should be between 2 to 250 |
-| defaultSettings.storageMinimalAvailablePercentage | Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25". |
-| defaultSettings.storageNetwork | Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network. |
-| defaultSettings.storageOverProvisioningPercentage | Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100". |
-| defaultSettings.storageReservedPercentageForDefaultDisk | Percentage of disk space that is not allocated to the default disk on each new Longhorn node. |
-| defaultSettings.supportBundleFailedHistoryLimit | Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles. |
-| defaultSettings.systemManagedComponentsNodeSelector | Node selector for system-managed Longhorn components. |
-| defaultSettings.systemManagedPodsImagePullPolicy | Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart. |
-| defaultSettings.taintToleration | Taint or toleration for system-managed Longhorn components. Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect). |
-| defaultSettings.upgradeChecker | Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default |
-| defaultSettings.v1DataEngine | Setting that allows you to enable the V1 Data Engine. |
-| defaultSettings.v2DataEngine | Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. |
-| defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU | Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". |
-| defaultSettings.v2DataEngineHugepageLimit | Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine. |
+From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
---
Please see [link](https://github.com/longhorn/longhorn) for more information.

View File

@ -0,0 +1,23 @@
--- charts-original/app-readme.md
+++ charts/app-readme.md
@@ -9,3 +9,19 @@
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
+
+
+## Upgrading to Kubernetes v1.25+
+
+Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
+
+As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
+
+> **Note:**
+> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
+>
+> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
+
+Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
+
+As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
\ No newline at end of file

View File

@ -0,0 +1,128 @@
--- charts-original/questions.yaml
+++ charts/questions.yaml
@@ -11,7 +11,7 @@
group: "Longhorn Images"
subquestions:
- variable: image.longhorn.manager.repository
- default: longhornio/longhorn-manager
+ default: rancher/mirrored-longhornio-longhorn-manager
description: "Repository for the Longhorn Manager image."
type: string
label: Longhorn Manager Image Repository
@@ -23,7 +23,7 @@
label: Longhorn Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.repository
- default: longhornio/longhorn-engine
+ default: rancher/mirrored-longhornio-longhorn-engine
description: "Repository for the Longhorn Engine image."
type: string
label: Longhorn Engine Image Repository
@@ -35,7 +35,7 @@
label: Longhorn Engine Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.repository
- default: longhornio/longhorn-ui
+ default: rancher/mirrored-longhornio-longhorn-ui
description: "Repository for the Longhorn UI image."
type: string
label: Longhorn UI Image Repository
@@ -47,7 +47,7 @@
label: Longhorn UI Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.repository
- default: longhornio/longhorn-instance-manager
+ default: rancher/mirrored-longhornio-longhorn-instance-manager
description: "Repository for the Longhorn Instance Manager image."
type: string
label: Longhorn Instance Manager Image Repository
@@ -59,7 +59,7 @@
label: Longhorn Instance Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.repository
- default: longhornio/longhorn-share-manager
+ default: rancher/mirrored-longhornio-longhorn-share-manager
description: "Repository for the Longhorn Share Manager image."
type: string
label: Longhorn Share Manager Image Repository
@@ -71,7 +71,7 @@
label: Longhorn Share Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.repository
- default: longhornio/backing-image-manager
+ default: rancher/mirrored-longhornio-backing-image-manager
description: "Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn Backing Image Manager Image Repository
@@ -83,7 +83,7 @@
label: Longhorn Backing Image Manager Image Tag
group: "Longhorn Images Settings"
- variable: image.longhorn.supportBundleKit.repository
- default: longhornio/support-bundle-kit
+ default: rancher/mirrored-longhornio-support-bundle-kit
description: "Repository for the Longhorn Support Bundle Manager image."
type: string
label: Longhorn Support Bundle Kit Image Repository
@@ -95,7 +95,7 @@
label: Longhorn Support Bundle Kit Image Tag
group: "Longhorn Images Settings"
- variable: image.csi.attacher.repository
- default: longhornio/csi-attacher
+ default: rancher/mirrored-longhornio-csi-attacher
description: "Repository for the CSI attacher image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Attacher Image Repository
@@ -107,7 +107,7 @@
label: Longhorn CSI Attacher Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.provisioner.repository
- default: longhornio/csi-provisioner
+ default: rancher/mirrored-longhornio-csi-provisioner
description: "Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Provisioner Image Repository
@@ -119,7 +119,7 @@
label: Longhorn CSI Provisioner Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.repository
- default: longhornio/csi-node-driver-registrar
+ default: rancher/mirrored-longhornio-csi-node-driver-registrar
description: "Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Node Driver Registrar Image Repository
@@ -131,7 +131,7 @@
label: Longhorn CSI Node Driver Registrar Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.resizer.repository
- default: longhornio/csi-resizer
+ default: rancher/mirrored-longhornio-csi-resizer
description: "Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Resizer Image Repository
@@ -143,7 +143,7 @@
label: Longhorn CSI Driver Resizer Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.snapshotter.repository
- default: longhornio/csi-snapshotter
+ default: rancher/mirrored-longhornio-csi-snapshotter
description: "Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Driver Snapshotter Image Repository
@@ -155,7 +155,7 @@
label: Longhorn CSI Driver Snapshotter Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.repository
- default: longhornio/livenessprobe
+ default: rancher/mirrored-longhornio-livenessprobe
description: "Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value."
type: string
label: Longhorn CSI Liveness Probe Image Repository
@@ -167,7 +167,7 @@
label: Longhorn CSI Liveness Probe Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.openshift.oauthProxy.repository
- default: longhornio/openshift-origin-oauth-proxy
+ default: rancher/mirrored-longhornio-openshift-origin-oauth-proxy
description: "Repository for the OAuth Proxy image. This setting applies only to OpenShift users"
type: string
label: OpenShift OAuth Proxy Image Repository

View File

@ -0,0 +1,91 @@
--- charts-original/values.yaml
+++ charts/values.yaml
@@ -33,74 +33,74 @@
longhorn:
engine:
# -- Repository for the Longhorn Engine image.
- repository: longhornio/longhorn-engine
+ repository: rancher/mirrored-longhornio-longhorn-engine
# -- Specify Longhorn engine image tag
tag: v1.6.2
manager:
# -- Repository for the Longhorn Manager image.
- repository: longhornio/longhorn-manager
+ repository: rancher/mirrored-longhornio-longhorn-manager
# -- Specify Longhorn manager image tag
tag: v1.6.2
ui:
# -- Repository for the Longhorn UI image.
- repository: longhornio/longhorn-ui
+ repository: rancher/mirrored-longhornio-longhorn-ui
# -- Specify Longhorn ui image tag
tag: v1.6.2
instanceManager:
# -- Repository for the Longhorn Instance Manager image.
- repository: longhornio/longhorn-instance-manager
+ repository: rancher/mirrored-longhornio-longhorn-instance-manager
# -- Specify Longhorn instance manager image tag
tag: v1.6.2
shareManager:
# -- Repository for the Longhorn Share Manager image.
- repository: longhornio/longhorn-share-manager
+ repository: rancher/mirrored-longhornio-longhorn-share-manager
# -- Specify Longhorn share manager image tag
tag: v1.6.2
backingImageManager:
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
- repository: longhornio/backing-image-manager
+ repository: rancher/mirrored-longhornio-backing-image-manager
# -- Specify Longhorn backing image manager image tag
tag: v1.6.2
supportBundleKit:
# -- Repository for the Longhorn Support Bundle Manager image.
- repository: longhornio/support-bundle-kit
+ repository: rancher/mirrored-longhornio-support-bundle-kit
# -- Tag for the Longhorn Support Bundle Manager image.
tag: v0.0.37
csi:
attacher:
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
- repository: longhornio/csi-attacher
+ repository: rancher/mirrored-longhornio-csi-attacher
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
tag: v4.5.1
provisioner:
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
- repository: longhornio/csi-provisioner
+ repository: rancher/mirrored-longhornio-csi-provisioner
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
tag: v3.6.4
nodeDriverRegistrar:
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
- repository: longhornio/csi-node-driver-registrar
+ repository: rancher/mirrored-longhornio-csi-node-driver-registrar
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
tag: v2.9.2
resizer:
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
- repository: longhornio/csi-resizer
+ repository: rancher/mirrored-longhornio-csi-resizer
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
tag: v1.10.1
snapshotter:
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
- repository: longhornio/csi-snapshotter
+ repository: rancher/mirrored-longhornio-csi-snapshotter
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
tag: v6.3.4
livenessProbe:
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
- repository: longhornio/livenessprobe
+ repository: rancher/mirrored-longhornio-livenessprobe
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
tag: v2.12.0
openshift:
oauthProxy:
# -- Repository for the OAuth Proxy image. This setting applies only to OpenShift users.
- repository: longhornio/openshift-origin-oauth-proxy
+ repository: rancher/mirrored-longhornio-openshift-origin-oauth-proxy
# -- Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14.
tag: 4.14
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.

View File

@ -0,0 +1,5 @@
url: https://github.com/longhorn/charts.git
subdirectory: charts/longhorn
commit: ad73dc01239b7eeb25ff510ce8358578433d85a5
version: 104.1.0
doNotRelease: false

View File

@ -48,6 +48,7 @@ longhorn:
- 104.0.0+up1.5.5
- 102.4.1+up1.6.2
- 103.3.1+up1.6.2
- 104.1.0+up1.6.2
longhorn-crd:
- 102.3.1+up1.5.3
- 102.3.2+up1.5.4
@ -59,6 +60,7 @@ longhorn-crd:
- 104.0.0+up1.5.5
- 102.4.1+up1.6.2
- 103.3.1+up1.6.2
- 104.1.0+up1.6.2
harvester-cloud-provider:
- 104.0.0+up0.2.3
- 102.0.2+up0.2.3