categories: - storage namespace: longhorn-system questions: - variable: image.defaultImage default: "true" description: "Use default Longhorn images" label: Use Default Images type: boolean show_subquestion_if: false group: "Longhorn Images" subquestions: - variable: image.longhorn.manager.repository default: rancher/mirrored-longhornio-longhorn-manager description: "Specify Longhorn Manager Image Repository" type: string label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag default: v1.5.1 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.engine.repository default: rancher/mirrored-longhornio-longhorn-engine description: "Specify Longhorn Engine Image Repository" type: string label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag default: v1.5.1 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.ui.repository default: rancher/mirrored-longhornio-longhorn-ui description: "Specify Longhorn UI Image Repository" type: string label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag default: v1.5.1 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.instanceManager.repository default: rancher/mirrored-longhornio-longhorn-instance-manager description: "Specify Longhorn Instance Manager Image Repository" type: string label: Longhorn Instance Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.instanceManager.tag default: v1.5.1 description: "Specify Longhorn Instance Manager Image Tag" type: string label: Longhorn Instance Manager Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.shareManager.repository default: rancher/mirrored-longhornio-longhorn-share-manager description: "Specify Longhorn Share Manager Image Repository" type: string label: Longhorn Share Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.shareManager.tag default: v1.5.1 description: "Specify Longhorn Share Manager Image Tag" type: string label: Longhorn Share Manager Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.backingImageManager.repository default: rancher/mirrored-longhornio-backing-image-manager description: "Specify Longhorn Backing Image Manager Image Repository" type: string label: Longhorn Backing Image Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.backingImageManager.tag default: v1.5.1 description: "Specify Longhorn Backing Image Manager Image Tag" type: string label: Longhorn Backing Image Manager Image Tag group: "Longhorn Images Settings" - variable: image.longhorn.supportBundleKit.repository default: rancher/mirrored-longhornio-support-bundle-kit description: "Specify Longhorn Support Bundle Manager Image Repository" type: string label: Longhorn Support Bundle Kit Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.supportBundleKit.tag default: v0.0.25 description: "Specify Longhorn Support Bundle Manager Image Tag" type: string label: Longhorn Support Bundle Kit Image Tag group: "Longhorn Images Settings" - variable: image.csi.attacher.repository default: rancher/mirrored-longhornio-csi-attacher description: "Specify CSI attacher image repository. Leave blank to autodetect." type: string label: Longhorn CSI Attacher Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.attacher.tag default: v4.2.0 description: "Specify CSI attacher image tag. Leave blank to autodetect." type: string label: Longhorn CSI Attacher Image Tag group: "Longhorn CSI Driver Images" - variable: image.csi.provisioner.repository default: rancher/mirrored-longhornio-csi-provisioner description: "Specify CSI provisioner image repository. Leave blank to autodetect." type: string label: Longhorn CSI Provisioner Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.provisioner.tag default: v3.4.1 description: "Specify CSI provisioner image tag. Leave blank to autodetect." type: string label: Longhorn CSI Provisioner Image Tag group: "Longhorn CSI Driver Images" - variable: image.csi.nodeDriverRegistrar.repository default: rancher/mirrored-longhornio-csi-node-driver-registrar description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect." type: string label: Longhorn CSI Node Driver Registrar Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.nodeDriverRegistrar.tag default: v2.7.0 description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect." type: string label: Longhorn CSI Node Driver Registrar Image Tag group: "Longhorn CSI Driver Images" - variable: image.csi.resizer.repository default: rancher/mirrored-longhornio-csi-resizer description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect." type: string label: Longhorn CSI Driver Resizer Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.resizer.tag default: v1.7.0 description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect." type: string label: Longhorn CSI Driver Resizer Image Tag group: "Longhorn CSI Driver Images" - variable: image.csi.snapshotter.repository default: rancher/mirrored-longhornio-csi-snapshotter description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect." type: string label: Longhorn CSI Driver Snapshotter Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.snapshotter.tag default: v6.2.1 description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect." type: string label: Longhorn CSI Driver Snapshotter Image Tag group: "Longhorn CSI Driver Images" - variable: image.csi.livenessProbe.repository default: rancher/mirrored-longhornio-livenessprobe description: "Specify CSI liveness probe image repository. Leave blank to autodetect." type: string label: Longhorn CSI Liveness Probe Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.livenessProbe.tag default: v2.9.0 description: "Specify CSI liveness probe image tag. Leave blank to autodetect." type: string label: Longhorn CSI Liveness Probe Image Tag group: "Longhorn CSI Driver Images" - variable: privateRegistry.registryUrl label: Private registry URL description: "URL of private registry. Leave blank to apply system default registry." group: "Private Registry Settings" type: string default: "" - variable: privateRegistry.registrySecret label: Private registry secret name description: "If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry." group: "Private Registry Settings" type: string default: "" - variable: privateRegistry.createSecret default: "true" description: "Create a new private registry secret" type: boolean group: "Private Registry Settings" label: Create Secret for Private Registry Settings show_subquestion_if: true subquestions: - variable: privateRegistry.registryUser label: Private registry user description: "User used to authenticate to private registry." type: string default: "" - variable: privateRegistry.registryPasswd label: Private registry password description: "Password used to authenticate to private registry." type: password default: "" - variable: longhorn.default_setting default: "false" description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn." label: "Customize Default Settings" type: boolean show_subquestion_if: true group: "Longhorn Default Settings" subquestions: - variable: csi.kubeletRootDir default: description: "Specify kubelet root-dir. Leave blank to autodetect." type: string label: Kubelet Root Directory group: "Longhorn CSI Driver Settings" - variable: csi.attacherReplicaCount type: int default: 3 min: 1 max: 10 description: "Specify replica count of CSI Attacher. By default 3." label: Longhorn CSI Attacher replica count group: "Longhorn CSI Driver Settings" - variable: csi.provisionerReplicaCount type: int default: 3 min: 1 max: 10 description: "Specify replica count of CSI Provisioner. By default 3." label: Longhorn CSI Provisioner replica count group: "Longhorn CSI Driver Settings" - variable: csi.resizerReplicaCount type: int default: 3 min: 1 max: 10 description: "Specify replica count of CSI Resizer. By default 3." label: Longhorn CSI Resizer replica count group: "Longhorn CSI Driver Settings" - variable: csi.snapshotterReplicaCount type: int default: 3 min: 1 max: 10 description: "Specify replica count of CSI Snapshotter. By default 3." label: Longhorn CSI Snapshotter replica count group: "Longhorn CSI Driver Settings" - variable: defaultSettings.backupTarget label: Backup Target description: "The endpoint used to access the backupstore. NFS and S3 are supported." group: "Longhorn Default Settings" type: string default: - variable: defaultSettings.backupTargetCredentialSecret label: Backup Target Credential Secret description: "The name of the Kubernetes secret associated with the backup target." group: "Longhorn Default Settings" type: string default: - variable: defaultSettings.allowRecurringJobWhileVolumeDetached label: Allow Recurring Job While Volume Is Detached description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup. Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.' group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.createDefaultDiskLabeledNodes label: Create Default Disk on Labeled Nodes description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.' group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.defaultDataPath label: Default Data Path description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"' group: "Longhorn Default Settings" type: string default: "/var/lib/longhorn/" - variable: defaultSettings.defaultDataLocality label: Default Data Locality description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume. This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass The available modes are: - **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload) - **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.' group: "Longhorn Default Settings" type: enum options: - "disabled" - "best-effort" default: "disabled" - variable: defaultSettings.replicaSoftAntiAffinity label: Replica Node Level Soft Anti-Affinity description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.' group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.replicaAutoBalance label: Replica Auto Balance description: 'Enable this setting automatically rebalances replicas when discovered an available node. The available global options are: - **disabled**. This is the default option. No replica auto-balance will be done. - **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy. - **best-effort**. This option instructs Longhorn to balance replicas for even redundancy. Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting. The available volume spec options are: - **ignored**. This is the default option that instructs Longhorn to inherit from the global setting. - **disabled**. This option instructs Longhorn no replica auto-balance should be done. - **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy. - **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.' group: "Longhorn Default Settings" type: enum options: - "disabled" - "least-effort" - "best-effort" default: "disabled" - variable: defaultSettings.storageOverProvisioningPercentage label: Storage Over Provisioning Percentage description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200." group: "Longhorn Default Settings" type: int min: 0 default: 200 - variable: defaultSettings.storageMinimalAvailablePercentage label: Storage Minimal Available Percentage description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25." group: "Longhorn Default Settings" type: int min: 0 max: 100 default: 25 - variable: defaultSettings.upgradeChecker label: Enable Upgrade Checker description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.' group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.defaultReplicaCount label: Default Replica Count description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3." group: "Longhorn Default Settings" type: int min: 1 max: 20 default: 3 - variable: defaultSettings.defaultLonghornStaticStorageClass label: Default Longhorn Static StorageClass Name description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'." group: "Longhorn Default Settings" type: string default: "longhorn-static" - variable: defaultSettings.backupstorePollInterval label: Backupstore Poll Interval description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300." group: "Longhorn Default Settings" type: int min: 0 default: 300 - variable: defaultSettings.failedBackupTTL label: Failed Backup Time to Live description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion. Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting. Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**. Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion." group: "Longhorn Default Settings" type: int min: 0 default: 1440 - variable: defaultSettings.restoreVolumeRecurringJobs label: Restore Volume Recurring Jobs description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration. Longhorn also supports individual volume setting. The setting can be specified on Backup page when making a backup restoration, this overrules the global setting. The available volume setting options are: - **ignored**. This is the default option that instructs Longhorn to inherit from the global setting. - **enabled**. This option instructs Longhorn to restore recurring jobs/groups from the backup target forcibly. - **disabled**. This option instructs Longhorn no restoring recurring jobs/groups should be done." group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.recurringSuccessfulJobsHistoryLimit label: Cronjob Successful Jobs History Limit description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0." group: "Longhorn Default Settings" type: int min: 0 default: 1 - variable: defaultSettings.recurringFailedJobsHistoryLimit label: Cronjob Failed Jobs History Limit description: "This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0." group: "Longhorn Default Settings" type: int min: 0 default: 1 - variable: defaultSettings.supportBundleFailedHistoryLimit label: SupportBundle Failed History Limit description: "This setting specifies how many failed support bundles can exist in the cluster. The retained failed support bundle is for analysis purposes and needs to clean up manually. Set this value to **0** to have Longhorn automatically purge all failed support bundles." group: "Longhorn Default Settings" type: int min: 0 default: 1 - variable: defaultSettings.autoSalvage label: Automatic salvage description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true." group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount. If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume. **Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.' group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.disableSchedulingOnCordonedNode label: Disable Scheduling On Cordoned Node description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true." group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.replicaZoneSoftAntiAffinity label: Replica Zone Level Soft Anti-Affinity description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=` in the Kubernetes node object to identify the zone. By default true." group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.nodeDownPodDeletionPolicy label: Pod Deletion Policy When Node is Down description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down. - **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down. - **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. - **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. - **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods." group: "Longhorn Default Settings" type: enum options: - "do-nothing" - "delete-statefulset-pod" - "delete-deployment-pod" - "delete-both-statefulset-and-deployment-pod" default: "do-nothing" - variable: defaultSettings.nodeDrainPolicy label: Node Drain Policy description: "Define the policy to use when a node with the last healthy replica of a volume is drained. - **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume. - **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance. - **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining." group: "Longhorn Default Settings" type: enum options: - "block-if-contains-last-replica" - "allow-if-replica-is-stopped" - "always-allow" default: "block-if-contains-last-replica" - variable: defaultSettings.replicaReplenishmentWaitInterval label: Replica Replenishment Wait Interval description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume. Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case." group: "Longhorn Default Settings" type: int min: 0 default: 600 - variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit label: Concurrent Replica Rebuild Per Node Limit description: "This setting controls how many replicas on a node can be rebuilt simultaneously. Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding. WARNING: - The old setting \"Disable Replica Rebuild\" is replaced by this setting. - Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped. - When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore." group: "Longhorn Default Settings" type: int min: 0 default: 5 - variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit label: Concurrent Volume Backup Restore Per Node Limit description: "This setting controls how many volumes on a node can restore the backup concurrently. Longhorn blocks the backup restore once the restoring volume count exceeds the limit. Set the value to **0** to disable backup restore." group: "Longhorn Default Settings" type: int min: 0 default: 5 - variable: defaultSettings.disableRevisionCounter label: Disable Revision Counter description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume." group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.systemManagedPodsImagePullPolicy label: System Managed Pod Image Pull Policy description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart." group: "Longhorn Default Settings" type: enum options: - "if-not-present" - "always" - "never" default: "if-not-present" - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability label: Allow Volume Creation with Degraded Availability description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation." group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot label: Automatically Cleanup System Generated Snapshot description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done." group: "Longhorn Default Settings" type: boolean default: "true" - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit label: Concurrent Automatic Engine Upgrade Per Node Limit description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version." group: "Longhorn Default Settings" type: int min: 0 default: 0 - variable: defaultSettings.backingImageCleanupWaitInterval label: Backing Image Cleanup Wait Interval description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it." group: "Longhorn Default Settings" type: int min: 0 default: 60 - variable: defaultSettings.backingImageRecoveryWaitInterval label: Backing Image Recovery Wait Interval description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown. WARNING: - This recovery only works for the backing image of which the creation type is \"download\". - File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected." group: "Longhorn Default Settings" type: int min: 0 default: 300 - variable: defaultSettings.guaranteedEngineManagerCPU label: Guaranteed Engine Manager CPU description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload. In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100. The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. WARNING: - Value 0 means unsetting CPU requests for engine manager pods. - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40. - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set. - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." group: "Longhorn Default Settings" type: int min: 0 max: 40 default: 12 - variable: defaultSettings.guaranteedReplicaManagerCPU label: Guaranteed Replica Manager CPU description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload. In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100. The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. WARNING: - Value 0 means unsetting CPU requests for replica manager pods. - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40. - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set. - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." group: "Longhorn Default Settings" type: int min: 0 max: 40 default: 12 - variable: defaultSettings.logLevel label: Log Level description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. By default Debug." group: "Longhorn Default Settings" type: string default: "Info" - variable: defaultSettings.kubernetesClusterAutoscalerEnabled label: Kubernetes Cluster Autoscaler Enabled (Experimental) description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler. Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions: - No volume attached to the node. - Is not the last node containing the replica of any volume. - Is not running backing image components pod. - Is not running share manager components pod." group: "Longhorn Default Settings" type: boolean default: false - variable: defaultSettings.orphanAutoDeletion label: Orphaned Data Cleanup description: "This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas. Orphan resources on down or unknown nodes will not be cleaned up automatically." group: "Longhorn Default Settings" type: boolean default: false - variable: defaultSettings.storageNetwork label: Storage Network description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network. To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in \"/\" format. WARNING: - The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes. - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes. - When applying the setting, Longhorn will restart all manager, instance-manager, and backing-image-manager pods." group: "Longhorn Default Settings" type: string default: - variable: defaultSettings.deletingConfirmationFlag label: Deleting Confirmation Flag description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost. Set this flag to **true** to allow Longhorn uninstallation. If this flag **false**, Longhorn uninstallation job will fail. " group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.engineReplicaTimeout label: Timeout between Engine and Replica description: "In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds. The default value is 8 seconds." group: "Longhorn Default Settings" type: int default: "8" - variable: defaultSettings.snapshotDataIntegrity label: Snapshot Data Integrity description: "This setting allows users to enable or disable snapshot hashing and data integrity checking. Available options are - **disabled**: Disable snapshot disk file hashing and data integrity checking. - **enabled**: Enables periodic snapshot disk file hashing and data integrity checking. To detect the filesystem-unaware corruption caused by bit rot or other issues in snapshot disk files, Longhorn system periodically hashes files and finds corrupted ones. Hence, the system performance will be impacted during the periodical checking. - **fast-check**: Enable snapshot disk file hashing and fast data integrity checking. Longhorn system only hashes snapshot disk files if their are not hashed or the modification time are changed. In this mode, filesystem-unaware corruption cannot be detected, but the impact on system performance can be minimized." group: "Longhorn Default Settings" type: string default: "disabled" - variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation label: Immediate Snapshot Data Integrity Check After Creating a Snapshot description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot." group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.snapshotDataIntegrityCronjob label: Snapshot Data Integrity Check CronJob description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files. Warning: Hashing snapshot disk files impacts the performance of the system. It is recommended to run data integrity checks during off-peak times and to reduce the frequency of checks." group: "Longhorn Default Settings" type: string default: "0 0 */7 * *" - variable: defaultSettings.removeSnapshotsDuringFilesystemTrim label: Remove Snapshots During Filesystem Trim description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children.\n\n Since Longhorn filesystem trim feature can be applied to the volume head and the followed continuous removed or system snapshots only.\n\n Notice that trying to trim a removed files from a valid snapshot will do nothing but the filesystem will discard this kind of in-memory trimmable file info.\n\n Later on if you mark the snapshot as removed and want to retry the trim, you may need to unmount and remount the filesystem so that the filesystem can recollect the trimmable file info." group: "Longhorn Default Settings" type: boolean default: "false" - variable: defaultSettings.fastReplicaRebuildEnabled label: Fast Replica Rebuild Enabled description: "This feature supports the fast replica rebuilding. It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite." group: "Longhorn Default Settings" type: boolean default: false - variable: defaultSettings.replicaFileSyncHttpClientTimeout label: Timeout of HTTP Client to Replica File Sync Server description: "In seconds. The setting specifies the HTTP client timeout to the file sync server." group: "Longhorn Default Settings" type: int default: "30" - variable: defaultSettings.backupCompressionMethod label: Backup Compression Method description: "This setting allows users to specify backup compression method. Available options are - **none**: Disable the compression method. Suitable for multimedia data such as encoded images and videos. - **lz4**: Fast compression method. Suitable for flat files. - **gzip**: A bit of higher compression ratio but relatively slow." group: "Longhorn Default Settings" type: string default: "lz4" - variable: defaultSettings.backupConcurrentLimit label: Backup Concurrent Limit Per Backup description: "This setting controls how many worker threads per backup concurrently." group: "Longhorn Default Settings" type: int min: 1 default: 2 - variable: defaultSettings.restoreConcurrentLimit label: Restore Concurrent Limit Per Backup description: "This setting controls how many worker threads per restore concurrently." group: "Longhorn Default Settings" type: int min: 1 default: 2 - variable: defaultSettings.v2DataEngine label: V2 Data Engine description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment. WARNING: - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes. - When applying the setting, Longhorn will restart all instance-manager pods. - When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations." group: "Longhorn V2 Data Engine (Preview Feature) Settings" type: boolean default: false - variable: defaultSettings.offlineReplicaRebuilding label: Offline Replica Rebuilding description: "This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine." group: "Longhorn V2 Data Engine (Preview Feature) Settings" required: true type: enum options: - "enabled" - "disabled" default: "enabled" - variable: persistence.defaultClass default: "true" description: "Set as default StorageClass for Longhorn" label: Default Storage Class group: "Longhorn Storage Class Settings" required: true type: boolean - variable: persistence.reclaimPolicy label: Storage Class Retain Policy description: "Define reclaim policy (Retain or Delete)" group: "Longhorn Storage Class Settings" required: true type: enum options: - "Delete" - "Retain" default: "Delete" - variable: persistence.defaultClassReplicaCount description: "Set replica count for Longhorn StorageClass" label: Default Storage Class Replica Count group: "Longhorn Storage Class Settings" type: int min: 1 max: 10 default: 3 - variable: persistence.defaultDataLocality description: "Set data locality for Longhorn StorageClass" label: Default Storage Class Data Locality group: "Longhorn Storage Class Settings" type: enum options: - "disabled" - "best-effort" default: "disabled" - variable: persistence.recurringJobSelector.enable description: "Enable recurring job selector for Longhorn StorageClass" group: "Longhorn Storage Class Settings" label: Enable Storage Class Recurring Job Selector type: boolean default: false show_subquestion_if: true subquestions: - variable: persistence.recurringJobSelector.jobList description: 'Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "isGroup":true}]' label: Storage Class Recurring Job Selector List group: "Longhorn Storage Class Settings" type: string default: - variable: defaultSettings.defaultNodeSelector.enable description: "Enable recurring Node selector for Longhorn StorageClass" group: "Longhorn Storage Class Settings" label: Enable Storage Class Node Selector type: boolean default: false show_subquestion_if: true subquestions: - variable: defaultSettings.defaultNodeSelector.selector label: Storage Class Node Selector description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged whith its value' group: "Longhorn Default Settings" type: string default: - variable: persistence.backingImage.enable description: "Set backing image for Longhorn StorageClass" group: "Longhorn Storage Class Settings" label: Default Storage Class Backing Image type: boolean default: false show_subquestion_if: true subquestions: - variable: persistence.backingImage.name description: 'Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it.' label: Storage Class Backing Image Name group: "Longhorn Storage Class Settings" type: string default: - variable: persistence.backingImage.expectedChecksum description: 'Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass. WARNING: - If the backing image name is not specified, setting this field is meaningless. - It is not recommended to set this field if the data source type is \"export-from-volume\".' label: Storage Class Backing Image Expected SHA512 Checksum group: "Longhorn Storage Class Settings" type: string default: - variable: persistence.backingImage.dataSourceType description: 'Specify the data source type for the backing image used in Longhorn StorageClass. If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image. WARNING: - If the backing image name is not specified, setting this field is meaningless. - As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.' label: Storage Class Backing Image Data Source Type group: "Longhorn Storage Class Settings" type: enum options: - "" - "download" - "upload" - "export-from-volume" default: "" - variable: persistence.backingImage.dataSourceParameters description: "Specify the data source parameters for the backing image used in Longhorn StorageClass. If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image. This option accepts a json string of a map. e.g., '{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'. WARNING: - If the backing image name is not specified, setting this field is meaningless. - Be careful of the quotes here." label: Storage Class Backing Image Data Source Parameters group: "Longhorn Storage Class Settings" type: string default: - variable: persistence.removeSnapshotsDuringFilesystemTrim description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass" label: Default Storage Class Remove Snapshots During Filesystem Trim group: "Longhorn Storage Class Settings" type: enum options: - "ignored" - "enabled" - "disabled" default: "ignored" - variable: ingress.enabled default: "false" description: "Expose app using Layer 7 Load Balancer - ingress" type: boolean group: "Services and Load Balancing" label: Expose app using Layer 7 Load Balancer show_subquestion_if: true subquestions: - variable: ingress.host default: "xip.io" description: "layer 7 Load Balancer hostname" type: hostname required: true label: Layer 7 Load Balancer Hostname - variable: ingress.path default: "/" description: "If ingress is enabled you can set the default ingress path" type: string required: true label: Ingress Path - variable: service.ui.type default: "Rancher-Proxy" description: "Define Longhorn UI service type" type: enum options: - "ClusterIP" - "NodePort" - "LoadBalancer" - "Rancher-Proxy" label: Longhorn UI Service show_if: "ingress.enabled=false" group: "Services and Load Balancing" show_subquestion_if: "NodePort" subquestions: - variable: service.ui.nodePort default: "" description: "NodePort port number(to set explicitly, choose port between 30000-32767)" type: int min: 30000 max: 32767 show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer" label: UI Service NodePort number - variable: enablePSP default: "false" description: "Setup a pod security policy for Longhorn workloads." label: Pod Security Policy type: boolean group: "Other Settings" - variable: global.cattle.windowsCluster.enabled default: "false" description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster." label: Rancher Windows Cluster type: boolean group: "Other Settings" - variable: networkPolicies.enabled description: "Enable NetworkPolicies to limit access to the longhorn pods. Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added." group: "Other Settings" label: Network Policies default: "false" type: boolean subquestions: - variable: networkPolicies.type label: Network Policies for Ingress description: "Create the policy to allow access for the ingress, select the distribution." show_if: "networkPolicies.enabled=true&&ingress.enabled=true" type: enum default: "rke2" options: - "rke1" - "rke2" - "k3s"