mirror of https://git.rancher.io/charts
Merge pull request #1727 from aiyengar2/forwardport_25_charts
Forwardport 2.5.12 chartspull/1701/head
commit
caea7ddb57
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/release-name: longhorn-crd
|
||||
apiVersion: v1
|
||||
description: Installs the CRDs for longhorn.
|
||||
name: longhorn-crd
|
||||
type: application
|
||||
version: 1.1.300+up1.1.3
|
|
@ -0,0 +1,2 @@
|
|||
# longhorn-crd
|
||||
A Rancher chart that installs the CRDs used by longhorn.
|
|
@ -0,0 +1,525 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: Engine
|
||||
name: engines.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Engine
|
||||
listKind: EngineList
|
||||
plural: engines
|
||||
shortNames:
|
||||
- lhe
|
||||
singular: engine
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the engine
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the engine is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the engine
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the engine
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: Replica
|
||||
name: replicas.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Replica
|
||||
listKind: ReplicaList
|
||||
plural: replicas
|
||||
shortNames:
|
||||
- lhr
|
||||
singular: replica
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the replica
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the replica is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Disk
|
||||
type: string
|
||||
description: The disk that the replica is on
|
||||
jsonPath: .spec.diskID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the replica
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the replica
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: Setting
|
||||
name: settings.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Setting
|
||||
listKind: SettingList
|
||||
plural: settings
|
||||
shortNames:
|
||||
- lhs
|
||||
singular: setting
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
additionalPrinterColumns:
|
||||
- name: Value
|
||||
type: string
|
||||
description: The value of the setting
|
||||
jsonPath: .value
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: Volume
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Volume
|
||||
listKind: VolumeList
|
||||
plural: volumes
|
||||
shortNames:
|
||||
- lhv
|
||||
singular: volume
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the volume
|
||||
jsonPath: .status.state
|
||||
- name: Robustness
|
||||
type: string
|
||||
description: The robustness of the volume
|
||||
jsonPath: .status.robustness
|
||||
- name: Scheduled
|
||||
type: string
|
||||
description: The scheduled condition of the volume
|
||||
jsonPath: .status.conditions['scheduled']['status']
|
||||
- name: Size
|
||||
type: string
|
||||
description: The size of the volume
|
||||
jsonPath: .spec.size
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the volume is currently attaching to
|
||||
jsonPath: .status.currentNodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: EngineImage
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: EngineImage
|
||||
listKind: EngineImageList
|
||||
plural: engineimages
|
||||
shortNames:
|
||||
- lhei
|
||||
singular: engineimage
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: State of the engine image
|
||||
jsonPath: .status.state
|
||||
- name: Image
|
||||
type: string
|
||||
description: The Longhorn engine image
|
||||
jsonPath: .spec.image
|
||||
- name: RefCount
|
||||
type: integer
|
||||
description: Number of volumes are using the engine image
|
||||
jsonPath: .status.refCount
|
||||
- name: BuildDate
|
||||
type: date
|
||||
description: The build date of the engine image
|
||||
jsonPath: .status.buildDate
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: Node
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Node
|
||||
listKind: NodeList
|
||||
plural: nodes
|
||||
shortNames:
|
||||
- lhn
|
||||
singular: node
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Ready
|
||||
type: string
|
||||
description: Indicate whether the node is ready
|
||||
jsonPath: .status.conditions['Ready']['status']
|
||||
- name: AllowScheduling
|
||||
type: boolean
|
||||
description: Indicate whether the user disabled/enabled replica scheduling for the node
|
||||
jsonPath: .spec.allowScheduling
|
||||
- name: Schedulable
|
||||
type: string
|
||||
description: Indicate whether Longhorn can schedule replicas on the node
|
||||
jsonPath: .status.conditions['Schedulable']['status']
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: InstanceManager
|
||||
name: instancemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: InstanceManager
|
||||
listKind: InstanceManagerList
|
||||
plural: instancemanagers
|
||||
shortNames:
|
||||
- lhim
|
||||
singular: instancemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the instance manager
|
||||
jsonPath: .status.currentState
|
||||
- name: Type
|
||||
type: string
|
||||
description: The type of the instance manager (engine or replica)
|
||||
jsonPath: .spec.type
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the instance manager is running on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: ShareManager
|
||||
name: sharemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: ShareManager
|
||||
listKind: ShareManagerList
|
||||
plural: sharemanagers
|
||||
shortNames:
|
||||
- lhsm
|
||||
singular: sharemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the share manager
|
||||
jsonPath: .status.state
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the share manager is owned by
|
||||
jsonPath: .status.ownerID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: BackingImage
|
||||
name: backingimages.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackingImage
|
||||
listKind: BackingImageList
|
||||
plural: backingimages
|
||||
shortNames:
|
||||
- lhbi
|
||||
singular: backingimage
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Image
|
||||
type: string
|
||||
description: The backing image name
|
||||
jsonPath: .spec.image
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.1.3
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.1.3
|
||||
longhorn-manager: BackingImageManager
|
||||
name: backingimagemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackingImageManager
|
||||
listKind: BackingImageManagerList
|
||||
plural: backingimagemanagers
|
||||
shortNames:
|
||||
- lhbim
|
||||
singular: backingimagemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the manager
|
||||
jsonPath: .status.currentState
|
||||
- name: Image
|
||||
type: string
|
||||
description: The image the manager pod will use
|
||||
jsonPath: .spec.image
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node the manager is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: DiskUUID
|
||||
type: string
|
||||
description: The disk the manager is responsible for
|
||||
jsonPath: .spec.diskUUID
|
||||
- name: DiskPath
|
||||
type: string
|
||||
description: The disk path the manager is using
|
||||
jsonPath: .spec.diskPath
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/release-name: longhorn-crd
|
||||
apiVersion: v1
|
||||
description: Installs the CRDs for longhorn.
|
||||
name: longhorn-crd
|
||||
type: application
|
||||
version: 1.2.300+up1.2.3
|
|
@ -0,0 +1,2 @@
|
|||
# longhorn-crd
|
||||
A Rancher chart that installs the CRDs used by longhorn.
|
|
@ -0,0 +1,832 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Engine
|
||||
name: engines.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Engine
|
||||
listKind: EngineList
|
||||
plural: engines
|
||||
shortNames:
|
||||
- lhe
|
||||
singular: engine
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the engine
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the engine is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the engine
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the engine
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Replica
|
||||
name: replicas.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Replica
|
||||
listKind: ReplicaList
|
||||
plural: replicas
|
||||
shortNames:
|
||||
- lhr
|
||||
singular: replica
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the replica
|
||||
jsonPath: .status.currentState
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the replica is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Disk
|
||||
type: string
|
||||
description: The disk that the replica is on
|
||||
jsonPath: .spec.diskID
|
||||
- name: InstanceManager
|
||||
type: string
|
||||
description: The instance manager of the replica
|
||||
jsonPath: .status.instanceManagerName
|
||||
- name: Image
|
||||
type: string
|
||||
description: The current image of the replica
|
||||
jsonPath: .status.currentImage
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Setting
|
||||
name: settings.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Setting
|
||||
listKind: SettingList
|
||||
plural: settings
|
||||
shortNames:
|
||||
- lhs
|
||||
singular: setting
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
additionalPrinterColumns:
|
||||
- name: Value
|
||||
type: string
|
||||
description: The value of the setting
|
||||
jsonPath: .value
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Volume
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Volume
|
||||
listKind: VolumeList
|
||||
plural: volumes
|
||||
shortNames:
|
||||
- lhv
|
||||
singular: volume
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the volume
|
||||
jsonPath: .status.state
|
||||
- name: Robustness
|
||||
type: string
|
||||
description: The robustness of the volume
|
||||
jsonPath: .status.robustness
|
||||
- name: Scheduled
|
||||
type: string
|
||||
description: The scheduled condition of the volume
|
||||
jsonPath: .status.conditions['scheduled']['status']
|
||||
- name: Size
|
||||
type: string
|
||||
description: The size of the volume
|
||||
jsonPath: .spec.size
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the volume is currently attaching to
|
||||
jsonPath: .status.currentNodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: EngineImage
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: EngineImage
|
||||
listKind: EngineImageList
|
||||
plural: engineimages
|
||||
shortNames:
|
||||
- lhei
|
||||
singular: engineimage
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: State of the engine image
|
||||
jsonPath: .status.state
|
||||
- name: Image
|
||||
type: string
|
||||
description: The Longhorn engine image
|
||||
jsonPath: .spec.image
|
||||
- name: RefCount
|
||||
type: integer
|
||||
description: Number of volumes are using the engine image
|
||||
jsonPath: .status.refCount
|
||||
- name: BuildDate
|
||||
type: date
|
||||
description: The build date of the engine image
|
||||
jsonPath: .status.buildDate
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Node
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Node
|
||||
listKind: NodeList
|
||||
plural: nodes
|
||||
shortNames:
|
||||
- lhn
|
||||
singular: node
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Ready
|
||||
type: string
|
||||
description: Indicate whether the node is ready
|
||||
jsonPath: .status.conditions['Ready']['status']
|
||||
- name: AllowScheduling
|
||||
type: boolean
|
||||
description: Indicate whether the user disabled/enabled replica scheduling for the node
|
||||
jsonPath: .spec.allowScheduling
|
||||
- name: Schedulable
|
||||
type: string
|
||||
description: Indicate whether Longhorn can schedule replicas on the node
|
||||
jsonPath: .status.conditions['Schedulable']['status']
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: InstanceManager
|
||||
name: instancemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: InstanceManager
|
||||
listKind: InstanceManagerList
|
||||
plural: instancemanagers
|
||||
shortNames:
|
||||
- lhim
|
||||
singular: instancemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the instance manager
|
||||
jsonPath: .status.currentState
|
||||
- name: Type
|
||||
type: string
|
||||
description: The type of the instance manager (engine or replica)
|
||||
jsonPath: .spec.type
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the instance manager is running on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: ShareManager
|
||||
name: sharemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: ShareManager
|
||||
listKind: ShareManagerList
|
||||
plural: sharemanagers
|
||||
shortNames:
|
||||
- lhsm
|
||||
singular: sharemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The state of the share manager
|
||||
jsonPath: .status.state
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node that the share manager is owned by
|
||||
jsonPath: .status.ownerID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: BackingImage
|
||||
name: backingimages.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackingImage
|
||||
listKind: BackingImageList
|
||||
plural: backingimages
|
||||
shortNames:
|
||||
- lhbi
|
||||
singular: backingimage
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Image
|
||||
type: string
|
||||
description: The backing image name
|
||||
jsonPath: .spec.image
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: BackingImageManager
|
||||
name: backingimagemanagers.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackingImageManager
|
||||
listKind: BackingImageManagerList
|
||||
plural: backingimagemanagers
|
||||
shortNames:
|
||||
- lhbim
|
||||
singular: backingimagemanager
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the manager
|
||||
jsonPath: .status.currentState
|
||||
- name: Image
|
||||
type: string
|
||||
description: The image the manager pod will use
|
||||
jsonPath: .spec.image
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node the manager is on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: DiskUUID
|
||||
type: string
|
||||
description: The disk the manager is responsible for
|
||||
jsonPath: .spec.diskUUID
|
||||
- name: DiskPath
|
||||
type: string
|
||||
description: The disk path the manager is using
|
||||
jsonPath: .spec.diskPath
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: BackingImageDataSource
|
||||
name: backingimagedatasources.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackingImageDataSource
|
||||
listKind: BackingImageDataSourceList
|
||||
plural: backingimagedatasources
|
||||
shortNames:
|
||||
- lhbids
|
||||
singular: backingimagedatasource
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: State
|
||||
type: string
|
||||
description: The current state of the pod used to provisione the backing image file from source
|
||||
jsonPath: .status.currentState
|
||||
- name: SourceType
|
||||
type: string
|
||||
description: The data source type
|
||||
jsonPath: .spec.sourceType
|
||||
- name: Node
|
||||
type: string
|
||||
description: The node the backing image file will be prepared on
|
||||
jsonPath: .spec.nodeID
|
||||
- name: DiskUUID
|
||||
type: string
|
||||
description: The disk the backing image file will be prepared on
|
||||
jsonPath: .spec.diskUUID
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: BackupTarget
|
||||
name: backuptargets.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackupTarget
|
||||
listKind: BackupTargetList
|
||||
plural: backuptargets
|
||||
shortNames:
|
||||
- lhbt
|
||||
singular: backuptarget
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: URL
|
||||
type: string
|
||||
description: The backup target URL
|
||||
jsonPath: .spec.backupTargetURL
|
||||
- name: Credential
|
||||
type: string
|
||||
description: The backup target credential secret
|
||||
jsonPath: .spec.credentialSecret
|
||||
- name: Interval
|
||||
type: string
|
||||
description: The backup target poll interval
|
||||
jsonPath: .spec.pollInterval
|
||||
- name: Available
|
||||
type: boolean
|
||||
description: Indicate whether the backup target is available or not
|
||||
jsonPath: .status.available
|
||||
- name: LastSyncedAt
|
||||
type: string
|
||||
description: The backup target last synced time
|
||||
jsonPath: .status.lastSyncedAt
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: BackupVolume
|
||||
name: backupvolumes.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: BackupVolume
|
||||
listKind: BackupVolumeList
|
||||
plural: backupvolumes
|
||||
shortNames:
|
||||
- lhbv
|
||||
singular: backupvolume
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: CreatedAt
|
||||
type: string
|
||||
description: The backup volume creation time
|
||||
jsonPath: .status.createdAt
|
||||
- name: LastBackupName
|
||||
type: string
|
||||
description: The backup volume last backup name
|
||||
jsonPath: .status.lastBackupName
|
||||
- name: LastBackupAt
|
||||
type: string
|
||||
description: The backup volume last backup time
|
||||
jsonPath: .status.lastBackupAt
|
||||
- name: LastSyncedAt
|
||||
type: string
|
||||
description: The backup volume last synced time
|
||||
jsonPath: .status.lastSyncedAt
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: Backup
|
||||
name: backups.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: Backup
|
||||
listKind: BackupList
|
||||
plural: backups
|
||||
shortNames:
|
||||
- lhb
|
||||
singular: backup
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: SnapshotName
|
||||
type: string
|
||||
description: The snapshot name
|
||||
jsonPath: .status.snapshotName
|
||||
- name: SnapshotSize
|
||||
type: string
|
||||
description: The snapshot size
|
||||
jsonPath: .status.size
|
||||
- name: SnapshotCreatedAt
|
||||
type: string
|
||||
description: The snapshot creation time
|
||||
jsonPath: .status.snapshotCreatedAt
|
||||
- name: State
|
||||
type: string
|
||||
description: The backup state
|
||||
jsonPath: .status.state
|
||||
- name: LastSyncedAt
|
||||
type: string
|
||||
description: The backup last synced time
|
||||
jsonPath: .status.lastSyncedAt
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
helm.sh/chart: longhorn-1.2.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.2.0
|
||||
longhorn-manager: RecurringJob
|
||||
name: recurringjobs.longhorn.io
|
||||
spec:
|
||||
group: longhorn.io
|
||||
names:
|
||||
kind: RecurringJob
|
||||
listKind: RecurringJobList
|
||||
plural: recurringjobs
|
||||
shortNames:
|
||||
- lhrj
|
||||
singular: recurringjob
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
metadata:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
groups:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
task:
|
||||
type: string
|
||||
pattern: "^snapshot|backup$"
|
||||
cron:
|
||||
type: string
|
||||
retain:
|
||||
type: integer
|
||||
concurrency:
|
||||
type: integer
|
||||
labels:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
status:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Groups
|
||||
type: string
|
||||
description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume.
|
||||
jsonPath: .spec.groups
|
||||
- name: Task
|
||||
type: string
|
||||
description: Should be one of "backup" or "snapshot".
|
||||
jsonPath: .spec.task
|
||||
- name: Cron
|
||||
type: string
|
||||
description: The cron expression represents recurring job scheduling.
|
||||
jsonPath: .spec.cron
|
||||
- name: Retain
|
||||
type: integer
|
||||
description: The number of snapshots/backups to keep for the volume.
|
||||
jsonPath: .spec.retain
|
||||
- name: Concurrency
|
||||
type: integer
|
||||
description: The concurrent job to run by each cron job.
|
||||
jsonPath: .spec.concurrency
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
- name: Labels
|
||||
type: string
|
||||
description: Specify the labels
|
||||
jsonPath: .spec.labels
|
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
|
@ -0,0 +1,41 @@
|
|||
annotations:
|
||||
catalog.cattle.io/auto-install: longhorn-crd=match
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Longhorn
|
||||
catalog.cattle.io/kube-version: '>= v1.16.0-0, < v1.22.0-0'
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
|
||||
catalog.cattle.io/rancher-version: <2.5.99-0
|
||||
catalog.cattle.io/release-name: longhorn
|
||||
catalog.cattle.io/ui-component: longhorn
|
||||
catalog.cattle.io/upstream-version: 1.1.3
|
||||
apiVersion: v1
|
||||
appVersion: v1.1.3
|
||||
description: Longhorn is a distributed block storage system for Kubernetes.
|
||||
home: https://github.com/longhorn/longhorn
|
||||
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
|
||||
keywords:
|
||||
- longhorn
|
||||
- storage
|
||||
- distributed
|
||||
- block
|
||||
- device
|
||||
- iscsi
|
||||
- nfs
|
||||
kubeVersion: '>= v1.16.0-0, < v1.22.0-0'
|
||||
maintainers:
|
||||
- email: maintainers@longhorn.io
|
||||
name: Longhorn maintainers
|
||||
name: longhorn
|
||||
sources:
|
||||
- https://github.com/longhorn/longhorn
|
||||
- https://github.com/longhorn/longhorn-engine
|
||||
- https://github.com/longhorn/longhorn-instance-manager
|
||||
- https://github.com/longhorn/longhorn-share-manager
|
||||
- https://github.com/longhorn/backing-image-manager
|
||||
- https://github.com/longhorn/longhorn-manager
|
||||
- https://github.com/longhorn/longhorn-ui
|
||||
- https://github.com/longhorn/longhorn-tests
|
||||
- https://github.com/longhorn/backing-image-manager
|
||||
version: 1.1.300+up1.1.3
|
|
@ -0,0 +1,33 @@
|
|||
# Longhorn Chart
|
||||
|
||||
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
|
||||
|
||||
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
## Source Code
|
||||
|
||||
Longhorn is 100% open source software. Project source code is spread across a number of repos:
|
||||
|
||||
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
|
||||
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
|
||||
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
|
||||
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
|
||||
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
|
||||
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
|
||||
2. Kubernetes v1.16+
|
||||
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
|
||||
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
|
||||
|
||||
From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
|
||||
|
||||
|
||||
---
|
||||
Please see [link](https://github.com/longhorn/longhorn) for more information.
|
|
@ -0,0 +1,11 @@
|
|||
# Longhorn
|
||||
|
||||
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
|
||||
|
||||
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
|
||||
|
||||
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
|
||||
|
||||
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
|
|
@ -0,0 +1,532 @@
|
|||
categories:
|
||||
- storage
|
||||
namespace: longhorn-system
|
||||
questions:
|
||||
- variable: image.defaultImage
|
||||
default: "true"
|
||||
description: "Use default Longhorn images"
|
||||
label: Use Default Images
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Longhorn Images"
|
||||
subquestions:
|
||||
- variable: image.longhorn.manager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-manager
|
||||
description: "Specify Longhorn Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.manager.tag
|
||||
default: v1.1.3
|
||||
description: "Specify Longhorn Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-engine
|
||||
description: "Specify Longhorn Engine Image Repository"
|
||||
type: string
|
||||
label: Longhorn Engine Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.tag
|
||||
default: v1.1.3
|
||||
description: "Specify Longhorn Engine Image Tag"
|
||||
type: string
|
||||
label: Longhorn Engine Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-ui
|
||||
description: "Specify Longhorn UI Image Repository"
|
||||
type: string
|
||||
label: Longhorn UI Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.tag
|
||||
default: v1.1.3
|
||||
description: "Specify Longhorn UI Image Tag"
|
||||
type: string
|
||||
label: Longhorn UI Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-instance-manager
|
||||
description: "Specify Longhorn Instance Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.tag
|
||||
default: v1_20211210
|
||||
description: "Specify Longhorn Instance Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-share-manager
|
||||
description: "Specify Longhorn Share Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.tag
|
||||
default: v1_20210416_patch1
|
||||
description: "Specify Longhorn Share Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.backingImageManager.repository
|
||||
default: rancher/mirrored-longhornio-backing-image-manager
|
||||
description: "Specify Longhorn Backing Image Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Backing Image Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.backingImageManager.tag
|
||||
default: v1_20210422_patch1
|
||||
description: "Specify Longhorn Backing Image Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Backing Image Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.csi.attacher.repository
|
||||
default: rancher/mirrored-longhornio-csi-attacher
|
||||
description: "Specify CSI attacher image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.attacher.tag
|
||||
default: v2.2.1-lh2
|
||||
description: "Specify CSI attacher image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.repository
|
||||
default: rancher/mirrored-longhornio-csi-provisioner
|
||||
description: "Specify CSI provisioner image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.tag
|
||||
default: v1.6.0-lh2
|
||||
description: "Specify CSI provisioner image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.repository
|
||||
default: rancher/mirrored-longhornio-csi-node-driver-registrar
|
||||
description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.tag
|
||||
default: v1.2.0-lh1
|
||||
description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.repository
|
||||
default: rancher/mirrored-longhornio-csi-resizer
|
||||
description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.tag
|
||||
default: v0.5.1-lh2
|
||||
description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.repository
|
||||
default: rancher/mirrored-longhornio-csi-snapshotter
|
||||
description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.tag
|
||||
default: v2.1.1-lh2
|
||||
description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: privateRegistry.registryUrl
|
||||
label: Private registry URL
|
||||
description: "URL of private registry. Leave blank to apply system default registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryUser
|
||||
label: Private registry user
|
||||
description: "User used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryPasswd
|
||||
label: Private registry password
|
||||
description: "Password used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: password
|
||||
default: ""
|
||||
- variable: privateRegistry.registrySecret
|
||||
label: Private registry secret name
|
||||
description: "Longhorn will automatically generate a Kubernetes secret with this name and use it to pull images from your private registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: longhorn.default_setting
|
||||
default: "false"
|
||||
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
|
||||
label: "Customize Default Settings"
|
||||
type: boolean
|
||||
show_subquestion_if: true
|
||||
group: "Longhorn Default Settings"
|
||||
subquestions:
|
||||
- variable: csi.kubeletRootDir
|
||||
default:
|
||||
description: "Specify kubelet root-dir. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Kubelet Root Directory
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.attacherReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Attacher. By default 3."
|
||||
label: Longhorn CSI Attacher replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.provisionerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Provisioner. By default 3."
|
||||
label: Longhorn CSI Provisioner replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.resizerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Resizer. By default 3."
|
||||
label: Longhorn CSI Resizer replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.snapshotterReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Snapshotter. By default 3."
|
||||
label: Longhorn CSI Snapshotter replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: defaultSettings.backupTarget
|
||||
label: Backup Target
|
||||
description: "The endpoint used to access the backupstore. NFS and S3 are supported."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.backupTargetCredentialSecret
|
||||
label: Backup Target Credential Secret
|
||||
description: "The name of the Kubernetes secret associated with the backup target."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
|
||||
label: Allow Recurring Job While Volume Is Detached
|
||||
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
|
||||
Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.createDefaultDiskLabeledNodes
|
||||
label: Create Default Disk on Labeled Nodes
|
||||
description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.defaultDataPath
|
||||
label: Default Data Path
|
||||
description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "/var/lib/longhorn/"
|
||||
- variable: defaultSettings.defaultDataLocality
|
||||
label: Default Data Locality
|
||||
description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
|
||||
This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
|
||||
The available modes are:
|
||||
- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
|
||||
- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "disabled"
|
||||
- "best-effort"
|
||||
default: "disabled"
|
||||
- variable: defaultSettings.replicaSoftAntiAffinity
|
||||
label: Replica Node Level Soft Anti-Affinity
|
||||
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.storageOverProvisioningPercentage
|
||||
label: Storage Over Provisioning Percentage
|
||||
description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 200
|
||||
- variable: defaultSettings.storageMinimalAvailablePercentage
|
||||
label: Storage Minimal Available Percentage
|
||||
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 100
|
||||
default: 25
|
||||
- variable: defaultSettings.upgradeChecker
|
||||
label: Enable Upgrade Checker
|
||||
description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.defaultReplicaCount
|
||||
label: Default Replica Count
|
||||
description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 20
|
||||
default: 3
|
||||
- variable: defaultSettings.defaultLonghornStaticStorageClass
|
||||
label: Default Longhorn Static StorageClass Name
|
||||
description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "longhorn-static"
|
||||
- variable: defaultSettings.backupstorePollInterval
|
||||
label: Backupstore Poll Interval
|
||||
description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 300
|
||||
- variable: defaultSettings.autoSalvage
|
||||
label: Automatic salvage
|
||||
description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
|
||||
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
|
||||
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
|
||||
If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
|
||||
**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.disableSchedulingOnCordonedNode
|
||||
label: Disable Scheduling On Cordoned Node
|
||||
description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.replicaZoneSoftAntiAffinity
|
||||
label: Replica Zone Level Soft Anti-Affinity
|
||||
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.nodeDownPodDeletionPolicy
|
||||
label: Pod Deletion Policy When Node is Down
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
|
||||
- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
|
||||
- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "do-nothing"
|
||||
- "delete-statefulset-pod"
|
||||
- "delete-deployment-pod"
|
||||
- "delete-both-statefulset-and-deployment-pod"
|
||||
default: "do-nothing"
|
||||
- variable: defaultSettings.allowNodeDrainWithLastHealthyReplica
|
||||
label: Allow Node Drain with the Last Healthy Replica
|
||||
description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.
|
||||
If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.mkfsExt4Parameters
|
||||
label: Custom mkfs.ext4 parameters
|
||||
description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
- variable: defaultSettings.disableReplicaRebuild
|
||||
label: Disable Replica Rebuild
|
||||
description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.replicaReplenishmentWaitInterval
|
||||
label: Replica Replenishment Wait Interval
|
||||
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
|
||||
Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 600
|
||||
- variable: defaultSettings.disableRevisionCounter
|
||||
label: Disable Revision Counter
|
||||
description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.systemManagedPodsImagePullPolicy
|
||||
label: System Managed Pod Image Pull Policy
|
||||
description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "if-not-present"
|
||||
- "always"
|
||||
- "never"
|
||||
default: "if-not-present"
|
||||
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
|
||||
label: Allow Volume Creation with Degraded Availability
|
||||
description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
|
||||
label: Automatically Cleanup System Generated Snapshot
|
||||
description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
|
||||
label: Concurrent Automatic Engine Upgrade Per Node Limit
|
||||
description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 0
|
||||
- variable: defaultSettings.backingImageCleanupWaitInterval
|
||||
label: Backing Image Cleanup Wait Interval
|
||||
description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 60
|
||||
- variable: defaultSettings.guaranteedEngineManagerCPU
|
||||
label: Guaranteed Engine Manager CPU
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload.
|
||||
In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
|
||||
Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100.
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING:
|
||||
- Value 0 means unsetting CPU requests for engine manager pods.
|
||||
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40.
|
||||
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
|
||||
- This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set.
|
||||
- After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 40
|
||||
default: 12
|
||||
- variable: defaultSettings.guaranteedReplicaManagerCPU
|
||||
label: Guaranteed Replica Manager CPU
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload.
|
||||
In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
|
||||
Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100.
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING:
|
||||
- Value 0 means unsetting CPU requests for replica manager pods.
|
||||
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40.
|
||||
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
|
||||
- This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set.
|
||||
- After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 40
|
||||
default: 12
|
||||
- variable: persistence.defaultClass
|
||||
default: "true"
|
||||
description: "Set as default StorageClass for Longhorn"
|
||||
label: Default Storage Class
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: boolean
|
||||
- variable: persistence.reclaimPolicy
|
||||
label: Storage Class Retain Policy
|
||||
description: "Define reclaim policy (Retain or Delete)"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: enum
|
||||
options:
|
||||
- "Delete"
|
||||
- "Retain"
|
||||
default: "Delete"
|
||||
- variable: persistence.defaultClassReplicaCount
|
||||
description: "Set replica count for Longhorn StorageClass"
|
||||
label: Default Storage Class Replica Count
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 10
|
||||
default: 3
|
||||
- variable: persistence.recurringJobs.enable
|
||||
description: "Enable recurring job for Longhorn StorageClass"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
label: Enable Storage Class Recurring Job
|
||||
type: boolean
|
||||
default: false
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: persistence.recurringJobs.jobList
|
||||
description: 'Recurring job list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,"labels": {"interval":"2m"}}]'
|
||||
label: Storage Class Recurring Job List
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: ingress.enabled
|
||||
default: "false"
|
||||
description: "Expose app using Layer 7 Load Balancer - ingress"
|
||||
type: boolean
|
||||
group: "Services and Load Balancing"
|
||||
label: Expose app using Layer 7 Load Balancer
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: ingress.host
|
||||
default: "xip.io"
|
||||
description: "layer 7 Load Balancer hostname"
|
||||
type: hostname
|
||||
required: true
|
||||
label: Layer 7 Load Balancer Hostname
|
||||
- variable: service.ui.type
|
||||
default: "Rancher-Proxy"
|
||||
description: "Define Longhorn UI service type"
|
||||
type: enum
|
||||
options:
|
||||
- "ClusterIP"
|
||||
- "NodePort"
|
||||
- "LoadBalancer"
|
||||
- "Rancher-Proxy"
|
||||
label: Longhorn UI Service
|
||||
show_if: "ingress.enabled=false"
|
||||
group: "Services and Load Balancing"
|
||||
show_subquestion_if: "NodePort"
|
||||
subquestions:
|
||||
- variable: service.ui.nodePort
|
||||
default: ""
|
||||
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
|
||||
type: int
|
||||
min: 30000
|
||||
max: 32767
|
||||
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
|
||||
label: UI Service NodePort number
|
||||
- variable: enablePSP
|
||||
default: "true"
|
||||
description: "Setup a pod security policy for Longhorn workloads."
|
||||
label: Pod Security Policy
|
||||
type: boolean
|
||||
group: "Other Settings"
|
|
@ -0,0 +1,5 @@
|
|||
Longhorn is now installed on the cluster!
|
||||
|
||||
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
|
||||
|
||||
Visit our documentation at https://longhorn.io/docs/
|
|
@ -0,0 +1,66 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "longhorn.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "longhorn.fullname" -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "longhorn.managerIP" -}}
|
||||
{{- $fullname := (include "longhorn.fullname" .) -}}
|
||||
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "secret" }}
|
||||
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
longhorn.labels generates the standard Helm labels.
|
||||
*/ -}}
|
||||
{{- define "longhorn.labels" -}}
|
||||
app.kubernetes.io/name: {{ template "longhorn.name" . }}
|
||||
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "registry_url" -}}
|
||||
{{- if .Values.privateRegistry.registryUrl -}}
|
||||
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
|
||||
{{- else -}}
|
||||
{{ include "system_default_registry" . }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
define the longhorn release namespace
|
||||
*/ -}}
|
||||
{{- define "release_namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: longhorn-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets", "statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["watch", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["longhorn.io"]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: longhorn-bind
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: longhorn-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
|
@ -0,0 +1,125 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-manager
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-manager
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-manager
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- daemon
|
||||
- --engine-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
|
||||
- --instance-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
|
||||
- --share-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
|
||||
- --backing-image-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --service-account
|
||||
- longhorn-service-account
|
||||
ports:
|
||||
- containerPort: 9500
|
||||
name: manager
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 9500
|
||||
volumeMounts:
|
||||
- name: dev
|
||||
mountPath: /host/dev/
|
||||
- name: proc
|
||||
mountPath: /host/proc/
|
||||
- name: longhorn
|
||||
mountPath: /var/lib/longhorn/
|
||||
mountPropagation: Bidirectional
|
||||
- name: longhorn-default-setting
|
||||
mountPath: /var/lib/longhorn-setting/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: DEFAULT_SETTING_PATH
|
||||
value: /var/lib/longhorn-setting/default-setting.yaml
|
||||
volumes:
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev/
|
||||
- name: proc
|
||||
hostPath:
|
||||
path: /proc/
|
||||
- name: longhorn
|
||||
hostPath:
|
||||
path: /var/lib/longhorn/
|
||||
- name: longhorn-default-setting
|
||||
configMap:
|
||||
name: longhorn-default-setting
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: "100%"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-backend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.service.manager.type }}
|
||||
sessionAffinity: ClientIP
|
||||
selector:
|
||||
app: longhorn-manager
|
||||
ports:
|
||||
- name: manager
|
||||
port: 9500
|
||||
targetPort: manager
|
||||
{{- if .Values.service.manager.nodePort }}
|
||||
nodePort: {{ .Values.service.manager.nodePort }}
|
||||
{{- end }}
|
|
@ -0,0 +1,41 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-default-setting
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
default-setting.yaml: |-
|
||||
backup-target: {{ .Values.defaultSettings.backupTarget }}
|
||||
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
|
||||
allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}
|
||||
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
|
||||
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
|
||||
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
|
||||
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
|
||||
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
|
||||
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
|
||||
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
|
||||
default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}
|
||||
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
|
||||
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
|
||||
taint-toleration: {{ .Values.defaultSettings.taintToleration }}
|
||||
system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }}
|
||||
priority-class: {{ .Values.defaultSettings.priorityClass }}
|
||||
auto-salvage: {{ .Values.defaultSettings.autoSalvage }}
|
||||
auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}
|
||||
disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}
|
||||
replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}
|
||||
node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}
|
||||
allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }}
|
||||
mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }}
|
||||
disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }}
|
||||
replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}
|
||||
disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}
|
||||
system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}
|
||||
allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}
|
||||
auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}
|
||||
concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}
|
||||
backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}
|
||||
guaranteed-engine-manager-cpu: {{ .Values.defaultSettings.guaranteedEngineManagerCPU }}
|
||||
guaranteed-replica-manager-cpu: {{ .Values.defaultSettings.guaranteedReplicaManagerCPU }}
|
|
@ -0,0 +1,104 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: longhorn-driver-deployer
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-driver-deployer
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-driver-deployer
|
||||
spec:
|
||||
initContainers:
|
||||
- name: wait-longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
|
||||
containers:
|
||||
- name: longhorn-driver-deployer
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- deploy-driver
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --manager-url
|
||||
- http://longhorn-backend:9500/v1
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: SERVICE_ACCOUNT
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
{{- if .Values.csi.kubeletRootDir }}
|
||||
- name: KUBELET_ROOT_DIR
|
||||
value: {{ .Values.csi.kubeletRootDir }}
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
|
||||
- name: CSI_ATTACHER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
|
||||
- name: CSI_PROVISIONER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
|
||||
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
|
||||
- name: CSI_RESIZER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
|
||||
- name: CSI_SNAPSHOTTER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
|
||||
{{- end }}
|
||||
{{- if .Values.csi.attacherReplicaCount }}
|
||||
- name: CSI_ATTACHER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.attacherReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.provisionerReplicaCount }}
|
||||
- name: CSI_PROVISIONER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.provisionerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.resizerReplicaCount }}
|
||||
- name: CSI_RESIZER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.resizerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.snapshotterReplicaCount }}
|
||||
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
securityContext:
|
||||
runAsUser: 0
|
|
@ -0,0 +1,72 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
name: longhorn-ui
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-ui
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-ui
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-ui
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
env:
|
||||
- name: LONGHORN_MANAGER_IP
|
||||
value: "http://longhorn-backend:9500"
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornUI.priorityClass | quote}}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
{{- end }}
|
||||
name: longhorn-frontend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
type: ClusterIP
|
||||
{{- else }}
|
||||
type: {{ .Values.service.ui.type }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: longhorn-ui
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
{{- if .Values.service.ui.nodePort }}
|
||||
nodePort: {{ .Values.service.ui.nodePort }}
|
||||
{{- else }}
|
||||
nodePort: null
|
||||
{{- end }}
|
|
@ -0,0 +1,34 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ingress
|
||||
annotations:
|
||||
{{- if .Values.ingress.tls }}
|
||||
ingress.kubernetes.io/secure-backends: "true"
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.ingress.annotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- host: {{ .Values.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "" .Values.ingress.path }}
|
||||
backend:
|
||||
serviceName: longhorn-frontend
|
||||
servicePort: 80
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Values.ingress.host }}
|
||||
secretName: {{ .Values.ingress.tlsSecret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
|
||||
name: longhorn-post-upgrade
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-post-upgrade
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-post-upgrade
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- post-upgrade
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,66 @@
|
|||
{{- if .Values.enablePSP }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: longhorn-psp
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
requiredDropCapabilities:
|
||||
- NET_RAW
|
||||
allowedCapabilities:
|
||||
- SYS_ADMIN
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- secret
|
||||
- projected
|
||||
- hostPath
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: longhorn-psp-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- longhorn-psp
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: longhorn-psp-binding
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: longhorn-psp-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.privateRegistry.registrySecret }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: {{ template "secret" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
name: longhorn-engine-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
longhorn.io/component: instance-manager
|
||||
longhorn.io/instance-manager-type: engine
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
name: longhorn-replica-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
longhorn.io/component: instance-manager
|
||||
longhorn.io/instance-manager-type: replica
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-storageclass
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
storageclass.yaml: |
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: longhorn
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
|
||||
provisioner: driver.longhorn.io
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
|
||||
volumeBindingMode: Immediate
|
||||
parameters:
|
||||
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
|
||||
staleReplicaTimeout: "30"
|
||||
fromBackup: ""
|
||||
baseImage: ""
|
||||
{{- if .Values.persistence.recurringJobs.enable }}
|
||||
recurringJobs: '{{ .Values.persistence.recurringJobs.jobList }}'
|
||||
{{- end }}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ include "release_namespace" $ }}
|
||||
labels: {{- include "longhorn.labels" $ | nindent 4 }}
|
||||
app: longhorn
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,49 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
name: longhorn-uninstall
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-uninstall
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-uninstall
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- uninstall
|
||||
- --force
|
||||
env:
|
||||
- name: LONGHORN_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-admin"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-edit"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-view"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"]
|
||||
verbs: [ "get", "list", "watch" ]
|
|
@ -0,0 +1,23 @@
|
|||
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
|
||||
# {{- $found := dict -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}}
|
||||
# {{- range .Capabilities.APIVersions -}}
|
||||
# {{- if hasKey $found (toString .) -}}
|
||||
# {{- set $found (toString .) true -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
# {{- range $_, $exists := $found -}}
|
||||
# {{- if (eq $exists false) -}}
|
||||
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
#{{- end -}}
|
|
@ -0,0 +1,218 @@
|
|||
# Default values for longhorn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
repository: rancher/mirrored-longhornio-longhorn-engine
|
||||
tag: v1.1.3
|
||||
manager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-manager
|
||||
tag: v1.1.3
|
||||
ui:
|
||||
repository: rancher/mirrored-longhornio-longhorn-ui
|
||||
tag: v1.1.3
|
||||
instanceManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-instance-manager
|
||||
tag: v1_20211210
|
||||
shareManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-share-manager
|
||||
tag: v1_20210416_patch1
|
||||
backingImageManager:
|
||||
repository: rancher/mirrored-longhornio-backing-image-manager
|
||||
tag: v1_20210422_patch1
|
||||
csi:
|
||||
attacher:
|
||||
repository: rancher/mirrored-longhornio-csi-attacher
|
||||
tag: v2.2.1-lh2
|
||||
provisioner:
|
||||
repository: rancher/mirrored-longhornio-csi-provisioner
|
||||
tag: v1.6.0-lh2
|
||||
nodeDriverRegistrar:
|
||||
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
|
||||
tag: v1.2.0-lh1
|
||||
resizer:
|
||||
repository: rancher/mirrored-longhornio-csi-resizer
|
||||
tag: v0.5.1-lh2
|
||||
snapshotter:
|
||||
repository: rancher/mirrored-longhornio-csi-snapshotter
|
||||
tag: v2.1.1-lh2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: ClusterIP
|
||||
nodePort: null
|
||||
manager:
|
||||
type: ClusterIP
|
||||
nodePort: ""
|
||||
|
||||
persistence:
|
||||
defaultClass: true
|
||||
defaultClassReplicaCount: 3
|
||||
reclaimPolicy: Delete
|
||||
recurringJobs:
|
||||
enable: false
|
||||
jobList: []
|
||||
|
||||
csi:
|
||||
kubeletRootDir: ~
|
||||
attacherReplicaCount: ~
|
||||
provisionerReplicaCount: ~
|
||||
resizerReplicaCount: ~
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
backupTarget: ~
|
||||
backupTargetCredentialSecret: ~
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
defaultDataPath: ~
|
||||
defaultDataLocality: ~
|
||||
replicaSoftAntiAffinity: ~
|
||||
storageOverProvisioningPercentage: ~
|
||||
storageMinimalAvailablePercentage: ~
|
||||
upgradeChecker: ~
|
||||
defaultReplicaCount: ~
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
backupstorePollInterval: ~
|
||||
taintToleration: ~
|
||||
systemManagedComponentsNodeSelector: ~
|
||||
priorityClass: ~
|
||||
autoSalvage: ~
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
allowNodeDrainWithLastHealthyReplica: ~
|
||||
mkfsExt4Parameters: ~
|
||||
disableReplicaRebuild: ~
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
disableRevisionCounter: ~
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
concurrentAutomaticEngineUpgradePerNodeLimit: ~
|
||||
backingImageCleanupWaitInterval: ~
|
||||
guaranteedEngineManagerCPU: ~
|
||||
guaranteedReplicaManagerCPU: ~
|
||||
privateRegistry:
|
||||
registryUrl: ~
|
||||
registryUser: ~
|
||||
registryPasswd: ~
|
||||
registrySecret: ~
|
||||
|
||||
longhornManager:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornDriver:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornUI:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
#
|
||||
|
||||
ingress:
|
||||
## Set to true to enable ingress record generation
|
||||
enabled: false
|
||||
|
||||
## Add ingressClassName to the Ingress
|
||||
## Can replace the kubernetes.io/ingress.class annotation on v1.18+
|
||||
ingressClassName: ~
|
||||
|
||||
host: xip.io
|
||||
|
||||
## Set this to true in order to enable TLS on the ingress record
|
||||
## A side effect of this will be that the backend service will be connected at port 443
|
||||
tls: false
|
||||
|
||||
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
## Ingress annotations done as key:value pairs
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## -----BEGIN RSA PRIVATE KEY-----
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
# - name: longhorn.local-tls
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# Configure a pod security policy in the Longhorn namespace to allow privileged pods
|
||||
enablePSP: true
|
||||
|
||||
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
|
||||
## and its release namespace is not the `longhorn-system`
|
||||
namespaceOverride: ""
|
||||
|
||||
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
|
||||
annotations: {}
|
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
|
@ -0,0 +1,40 @@
|
|||
annotations:
|
||||
catalog.cattle.io/auto-install: longhorn-crd=match
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Longhorn
|
||||
catalog.cattle.io/kube-version: '>=1.18.0-0'
|
||||
catalog.cattle.io/namespace: longhorn-system
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/provides-gvr: longhorn.io/v1beta1
|
||||
catalog.cattle.io/rancher-version: <2.5.99-0
|
||||
catalog.cattle.io/release-name: longhorn
|
||||
catalog.cattle.io/ui-component: longhorn
|
||||
catalog.cattle.io/upstream-version: 1.2.3
|
||||
apiVersion: v1
|
||||
appVersion: v1.2.3
|
||||
description: Longhorn is a distributed block storage system for Kubernetes.
|
||||
home: https://github.com/longhorn/longhorn
|
||||
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
|
||||
keywords:
|
||||
- longhorn
|
||||
- storage
|
||||
- distributed
|
||||
- block
|
||||
- device
|
||||
- iscsi
|
||||
- nfs
|
||||
kubeVersion: '>=1.18.0-0'
|
||||
maintainers:
|
||||
- email: maintainers@longhorn.io
|
||||
name: Longhorn maintainers
|
||||
name: longhorn
|
||||
sources:
|
||||
- https://github.com/longhorn/longhorn
|
||||
- https://github.com/longhorn/longhorn-engine
|
||||
- https://github.com/longhorn/longhorn-instance-manager
|
||||
- https://github.com/longhorn/longhorn-share-manager
|
||||
- https://github.com/longhorn/longhorn-manager
|
||||
- https://github.com/longhorn/longhorn-ui
|
||||
- https://github.com/longhorn/longhorn-tests
|
||||
- https://github.com/longhorn/backing-image-manager
|
||||
version: 1.2.300+up1.2.3
|
|
@ -0,0 +1,33 @@
|
|||
# Longhorn Chart
|
||||
|
||||
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
|
||||
|
||||
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
## Source Code
|
||||
|
||||
Longhorn is 100% open source software. Project source code is spread across a number of repos:
|
||||
|
||||
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
|
||||
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
|
||||
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
|
||||
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
|
||||
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
|
||||
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
|
||||
2. Kubernetes v1.18+
|
||||
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
|
||||
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc).
|
||||
|
||||
From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab.
|
||||
|
||||
|
||||
---
|
||||
Please see [link](https://github.com/longhorn/longhorn) for more information.
|
|
@ -0,0 +1,11 @@
|
|||
# Longhorn
|
||||
|
||||
Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
|
||||
|
||||
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
|
||||
|
||||
**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
|
||||
|
||||
**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
|
|
@ -0,0 +1,623 @@
|
|||
categories:
|
||||
- storage
|
||||
namespace: longhorn-system
|
||||
questions:
|
||||
- variable: image.defaultImage
|
||||
default: "true"
|
||||
description: "Use default Longhorn images"
|
||||
label: Use Default Images
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Longhorn Images"
|
||||
subquestions:
|
||||
- variable: image.longhorn.manager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-manager
|
||||
description: "Specify Longhorn Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.manager.tag
|
||||
default: v1.2.3
|
||||
description: "Specify Longhorn Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-engine
|
||||
description: "Specify Longhorn Engine Image Repository"
|
||||
type: string
|
||||
label: Longhorn Engine Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.engine.tag
|
||||
default: v1.2.3
|
||||
description: "Specify Longhorn Engine Image Tag"
|
||||
type: string
|
||||
label: Longhorn Engine Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-ui
|
||||
description: "Specify Longhorn UI Image Repository"
|
||||
type: string
|
||||
label: Longhorn UI Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.ui.tag
|
||||
default: v1.2.3
|
||||
description: "Specify Longhorn UI Image Tag"
|
||||
type: string
|
||||
label: Longhorn UI Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-instance-manager
|
||||
description: "Specify Longhorn Instance Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.instanceManager.tag
|
||||
default: v1_20211210
|
||||
description: "Specify Longhorn Instance Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Instance Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.repository
|
||||
default: rancher/mirrored-longhornio-longhorn-share-manager
|
||||
description: "Specify Longhorn Share Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.shareManager.tag
|
||||
default: v1_20211020
|
||||
description: "Specify Longhorn Share Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Share Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.backingImageManager.repository
|
||||
default: rancher/mirrored-longhornio-backing-image-manager
|
||||
description: "Specify Longhorn Backing Image Manager Image Repository"
|
||||
type: string
|
||||
label: Longhorn Backing Image Manager Image Repository
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.longhorn.backingImageManager.tag
|
||||
default: v2_20210820
|
||||
description: "Specify Longhorn Backing Image Manager Image Tag"
|
||||
type: string
|
||||
label: Longhorn Backing Image Manager Image Tag
|
||||
group: "Longhorn Images Settings"
|
||||
- variable: image.csi.attacher.repository
|
||||
default: rancher/mirrored-longhornio-csi-attacher
|
||||
description: "Specify CSI attacher image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.attacher.tag
|
||||
default: v3.2.1
|
||||
description: "Specify CSI attacher image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Attacher Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.repository
|
||||
default: rancher/mirrored-longhornio-csi-provisioner
|
||||
description: "Specify CSI provisioner image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.provisioner.tag
|
||||
default: v2.1.2
|
||||
description: "Specify CSI provisioner image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Provisioner Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.repository
|
||||
default: rancher/mirrored-longhornio-csi-node-driver-registrar
|
||||
description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.nodeDriverRegistrar.tag
|
||||
default: v2.3.0
|
||||
description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Node Driver Registrar Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.repository
|
||||
default: rancher/mirrored-longhornio-csi-resizer
|
||||
description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.resizer.tag
|
||||
default: v1.2.0
|
||||
description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Resizer Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.repository
|
||||
default: rancher/mirrored-longhornio-csi-snapshotter
|
||||
description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Repository
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: image.csi.snapshotter.tag
|
||||
default: v3.0.3
|
||||
description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Longhorn CSI Driver Snapshotter Image Tag
|
||||
group: "Longhorn CSI Driver Images"
|
||||
- variable: privateRegistry.registryUrl
|
||||
label: Private registry URL
|
||||
description: "URL of private registry. Leave blank to apply system default registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryUser
|
||||
label: Private registry user
|
||||
description: "User used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: privateRegistry.registryPasswd
|
||||
label: Private registry password
|
||||
description: "Password used to authenticate to private registry"
|
||||
group: "Private Registry Settings"
|
||||
type: password
|
||||
default: ""
|
||||
- variable: privateRegistry.registrySecret
|
||||
label: Private registry secret name
|
||||
description: "Longhorn will automatically generate a Kubernetes secret with this name and use it to pull images from your private registry."
|
||||
group: "Private Registry Settings"
|
||||
type: string
|
||||
default: ""
|
||||
- variable: longhorn.default_setting
|
||||
default: "false"
|
||||
description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
|
||||
label: "Customize Default Settings"
|
||||
type: boolean
|
||||
show_subquestion_if: true
|
||||
group: "Longhorn Default Settings"
|
||||
subquestions:
|
||||
- variable: csi.kubeletRootDir
|
||||
default:
|
||||
description: "Specify kubelet root-dir. Leave blank to autodetect."
|
||||
type: string
|
||||
label: Kubelet Root Directory
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.attacherReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Attacher. By default 3."
|
||||
label: Longhorn CSI Attacher replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.provisionerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Provisioner. By default 3."
|
||||
label: Longhorn CSI Provisioner replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.resizerReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Resizer. By default 3."
|
||||
label: Longhorn CSI Resizer replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: csi.snapshotterReplicaCount
|
||||
type: int
|
||||
default: 3
|
||||
min: 1
|
||||
max: 10
|
||||
description: "Specify replica count of CSI Snapshotter. By default 3."
|
||||
label: Longhorn CSI Snapshotter replica count
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: defaultSettings.backupTarget
|
||||
label: Backup Target
|
||||
description: "The endpoint used to access the backupstore. NFS and S3 are supported."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.backupTargetCredentialSecret
|
||||
label: Backup Target Credential Secret
|
||||
description: "The name of the Kubernetes secret associated with the backup target."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
|
||||
label: Allow Recurring Job While Volume Is Detached
|
||||
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
|
||||
Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.createDefaultDiskLabeledNodes
|
||||
label: Create Default Disk on Labeled Nodes
|
||||
description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.defaultDataPath
|
||||
label: Default Data Path
|
||||
description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "/var/lib/longhorn/"
|
||||
- variable: defaultSettings.defaultDataLocality
|
||||
label: Default Data Locality
|
||||
description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
|
||||
This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
|
||||
The available modes are:
|
||||
- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
|
||||
- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "disabled"
|
||||
- "best-effort"
|
||||
default: "disabled"
|
||||
- variable: defaultSettings.replicaSoftAntiAffinity
|
||||
label: Replica Node Level Soft Anti-Affinity
|
||||
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.replicaAutoBalance
|
||||
label: Replica Auto Balance
|
||||
description: 'Enable this setting automatically rebalances replicas when discovered an available node.
|
||||
The available global options are:
|
||||
- **disabled**. This is the default option. No replica auto-balance will be done.
|
||||
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
|
||||
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.
|
||||
Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting.
|
||||
The available volume spec options are:
|
||||
- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
|
||||
- **disabled**. This option instructs Longhorn no replica auto-balance should be done.
|
||||
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
|
||||
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "disabled"
|
||||
- "least-effort"
|
||||
- "best-effort"
|
||||
default: "disabled"
|
||||
- variable: defaultSettings.storageOverProvisioningPercentage
|
||||
label: Storage Over Provisioning Percentage
|
||||
description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 200
|
||||
- variable: defaultSettings.storageMinimalAvailablePercentage
|
||||
label: Storage Minimal Available Percentage
|
||||
description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 100
|
||||
default: 25
|
||||
- variable: defaultSettings.upgradeChecker
|
||||
label: Enable Upgrade Checker
|
||||
description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.defaultReplicaCount
|
||||
label: Default Replica Count
|
||||
description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 20
|
||||
default: 3
|
||||
- variable: defaultSettings.defaultLonghornStaticStorageClass
|
||||
label: Default Longhorn Static StorageClass Name
|
||||
description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "longhorn-static"
|
||||
- variable: defaultSettings.backupstorePollInterval
|
||||
label: Backupstore Poll Interval
|
||||
description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 300
|
||||
- variable: defaultSettings.autoSalvage
|
||||
label: Automatic salvage
|
||||
description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
|
||||
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
|
||||
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
|
||||
If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
|
||||
**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.disableSchedulingOnCordonedNode
|
||||
label: Disable Scheduling On Cordoned Node
|
||||
description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.replicaZoneSoftAntiAffinity
|
||||
label: Replica Zone Level Soft Anti-Affinity
|
||||
description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.nodeDownPodDeletionPolicy
|
||||
label: Pod Deletion Policy When Node is Down
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
|
||||
- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
|
||||
- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "do-nothing"
|
||||
- "delete-statefulset-pod"
|
||||
- "delete-deployment-pod"
|
||||
- "delete-both-statefulset-and-deployment-pod"
|
||||
default: "do-nothing"
|
||||
- variable: defaultSettings.allowNodeDrainWithLastHealthyReplica
|
||||
label: Allow Node Drain with the Last Healthy Replica
|
||||
description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.
|
||||
If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.mkfsExt4Parameters
|
||||
label: Custom mkfs.ext4 parameters
|
||||
description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
- variable: defaultSettings.disableReplicaRebuild
|
||||
label: Disable Replica Rebuild
|
||||
description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.replicaReplenishmentWaitInterval
|
||||
label: Replica Replenishment Wait Interval
|
||||
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
|
||||
Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 600
|
||||
- variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
|
||||
label: Concurrent Replica Rebuild Per Node Limit
|
||||
description: "This setting controls how many replicas on a node can be rebuilt simultaneously.
|
||||
Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding.
|
||||
WARNING:
|
||||
- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
|
||||
- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
|
||||
- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 5
|
||||
- variable: defaultSettings.disableRevisionCounter
|
||||
label: Disable Revision Counter
|
||||
description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
- variable: defaultSettings.systemManagedPodsImagePullPolicy
|
||||
label: System Managed Pod Image Pull Policy
|
||||
description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
- "if-not-present"
|
||||
- "always"
|
||||
- "never"
|
||||
default: "if-not-present"
|
||||
- variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
|
||||
label: Allow Volume Creation with Degraded Availability
|
||||
description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
|
||||
label: Automatically Cleanup System Generated Snapshot
|
||||
description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
- variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
|
||||
label: Concurrent Automatic Engine Upgrade Per Node Limit
|
||||
description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 0
|
||||
- variable: defaultSettings.backingImageCleanupWaitInterval
|
||||
label: Backing Image Cleanup Wait Interval
|
||||
description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 60
|
||||
- variable: defaultSettings.backingImageRecoveryWaitInterval
|
||||
label: Backing Image Recovery Wait Interval
|
||||
description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown.
|
||||
WARNING:
|
||||
- This recovery only works for the backing image of which the creation type is \"download\".
|
||||
- File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 300
|
||||
- variable: defaultSettings.guaranteedEngineManagerCPU
|
||||
label: Guaranteed Engine Manager CPU
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload.
|
||||
In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
|
||||
Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100.
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING:
|
||||
- Value 0 means unsetting CPU requests for engine manager pods.
|
||||
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40.
|
||||
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
|
||||
- This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set.
|
||||
- After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 40
|
||||
default: 12
|
||||
- variable: defaultSettings.guaranteedReplicaManagerCPU
|
||||
label: Guaranteed Replica Manager CPU
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload.
|
||||
In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
|
||||
Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100.
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING:
|
||||
- Value 0 means unsetting CPU requests for replica manager pods.
|
||||
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40.
|
||||
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
|
||||
- This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set.
|
||||
- After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
max: 40
|
||||
default: 12
|
||||
- variable: persistence.defaultClass
|
||||
default: "true"
|
||||
description: "Set as default StorageClass for Longhorn"
|
||||
label: Default Storage Class
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: boolean
|
||||
- variable: persistence.reclaimPolicy
|
||||
label: Storage Class Retain Policy
|
||||
description: "Define reclaim policy (Retain or Delete)"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: enum
|
||||
options:
|
||||
- "Delete"
|
||||
- "Retain"
|
||||
default: "Delete"
|
||||
- variable: persistence.defaultClassReplicaCount
|
||||
description: "Set replica count for Longhorn StorageClass"
|
||||
label: Default Storage Class Replica Count
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: int
|
||||
min: 1
|
||||
max: 10
|
||||
default: 3
|
||||
- variable: persistence.recurringJobSelector.enable
|
||||
description: "Enable recurring job selector for Longhorn StorageClass"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
label: Enable Storage Class Recurring Job Selector
|
||||
type: boolean
|
||||
default: false
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: persistence.recurringJobSelector.jobList
|
||||
description: 'Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "isGroup":true}]'
|
||||
label: Storage Class Recurring Job Selector List
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: persistence.backingImage.enable
|
||||
description: "Set backing image for Longhorn StorageClass"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
label: Default Storage Class Backing Image
|
||||
type: boolean
|
||||
default: false
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: persistence.backingImage.name
|
||||
description: 'Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it.'
|
||||
label: Storage Class Backing Image Name
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: persistence.backingImage.expectedChecksum
|
||||
description: 'Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass.
|
||||
WARNING:
|
||||
- If the backing image name is not specified, setting this field is meaningless.
|
||||
- It is not recommended to set this field if the data source type is \"export-from-volume\".'
|
||||
label: Storage Class Backing Image Expected SHA512 Checksum
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: persistence.backingImage.dataSourceType
|
||||
description: 'Specify the data source type for the backing image used in Longhorn StorageClass.
|
||||
If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
|
||||
WARNING:
|
||||
- If the backing image name is not specified, setting this field is meaningless.
|
||||
- As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
|
||||
label: Storage Class Backing Image Data Source Type
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: enum
|
||||
options:
|
||||
- ""
|
||||
- "download"
|
||||
- "upload"
|
||||
- "export-from-volume"
|
||||
default: ""
|
||||
- variable: persistence.backingImage.dataSourceParameters
|
||||
description: "Specify the data source parameters for the backing image used in Longhorn StorageClass.
|
||||
If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
|
||||
This option accepts a json string of a map. e.g., '{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'.
|
||||
WARNING:
|
||||
- If the backing image name is not specified, setting this field is meaningless.
|
||||
- Be careful of the quotes here."
|
||||
label: Storage Class Backing Image Data Source Parameters
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: ingress.enabled
|
||||
default: "false"
|
||||
description: "Expose app using Layer 7 Load Balancer - ingress"
|
||||
type: boolean
|
||||
group: "Services and Load Balancing"
|
||||
label: Expose app using Layer 7 Load Balancer
|
||||
show_subquestion_if: true
|
||||
subquestions:
|
||||
- variable: ingress.host
|
||||
default: "xip.io"
|
||||
description: "layer 7 Load Balancer hostname"
|
||||
type: hostname
|
||||
required: true
|
||||
label: Layer 7 Load Balancer Hostname
|
||||
- variable: service.ui.type
|
||||
default: "Rancher-Proxy"
|
||||
description: "Define Longhorn UI service type"
|
||||
type: enum
|
||||
options:
|
||||
- "ClusterIP"
|
||||
- "NodePort"
|
||||
- "LoadBalancer"
|
||||
- "Rancher-Proxy"
|
||||
label: Longhorn UI Service
|
||||
show_if: "ingress.enabled=false"
|
||||
group: "Services and Load Balancing"
|
||||
show_subquestion_if: "NodePort"
|
||||
subquestions:
|
||||
- variable: service.ui.nodePort
|
||||
default: ""
|
||||
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
|
||||
type: int
|
||||
min: 30000
|
||||
max: 32767
|
||||
show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
|
||||
label: UI Service NodePort number
|
||||
- variable: enablePSP
|
||||
default: "true"
|
||||
description: "Setup a pod security policy for Longhorn workloads."
|
||||
label: Pod Security Policy
|
||||
type: boolean
|
||||
group: "Other Settings"
|
|
@ -0,0 +1,5 @@
|
|||
Longhorn is now installed on the cluster!
|
||||
|
||||
Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
|
||||
|
||||
Visit our documentation at https://longhorn.io/docs/
|
|
@ -0,0 +1,66 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "longhorn.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "longhorn.fullname" -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "longhorn.managerIP" -}}
|
||||
{{- $fullname := (include "longhorn.fullname" .) -}}
|
||||
{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "secret" }}
|
||||
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
longhorn.labels generates the standard Helm labels.
|
||||
*/ -}}
|
||||
{{- define "longhorn.labels" -}}
|
||||
app.kubernetes.io/name: {{ template "longhorn.name" . }}
|
||||
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "registry_url" -}}
|
||||
{{- if .Values.privateRegistry.registryUrl -}}
|
||||
{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
|
||||
{{- else -}}
|
||||
{{ include "system_default_registry" . }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
define the longhorn release namespace
|
||||
*/ -}}
|
||||
{{- define "release_namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: longhorn-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets", "statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["watch", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["longhorn.io"]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
|
||||
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
|
||||
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
|
||||
"recurringjobs", "recurringjobs/status"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: longhorn-bind
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: longhorn-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
|
@ -0,0 +1,125 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-manager
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-manager
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-manager
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- daemon
|
||||
- --engine-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
|
||||
- --instance-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
|
||||
- --share-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
|
||||
- --backing-image-manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --service-account
|
||||
- longhorn-service-account
|
||||
ports:
|
||||
- containerPort: 9500
|
||||
name: manager
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 9500
|
||||
volumeMounts:
|
||||
- name: dev
|
||||
mountPath: /host/dev/
|
||||
- name: proc
|
||||
mountPath: /host/proc/
|
||||
- name: longhorn
|
||||
mountPath: /var/lib/longhorn/
|
||||
mountPropagation: Bidirectional
|
||||
- name: longhorn-default-setting
|
||||
mountPath: /var/lib/longhorn-setting/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: DEFAULT_SETTING_PATH
|
||||
value: /var/lib/longhorn-setting/default-setting.yaml
|
||||
volumes:
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev/
|
||||
- name: proc
|
||||
hostPath:
|
||||
path: /proc/
|
||||
- name: longhorn
|
||||
hostPath:
|
||||
path: /var/lib/longhorn/
|
||||
- name: longhorn-default-setting
|
||||
configMap:
|
||||
name: longhorn-default-setting
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: "100%"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-manager
|
||||
name: longhorn-backend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.service.manager.type }}
|
||||
sessionAffinity: ClientIP
|
||||
selector:
|
||||
app: longhorn-manager
|
||||
ports:
|
||||
- name: manager
|
||||
port: 9500
|
||||
targetPort: manager
|
||||
{{- if .Values.service.manager.nodePort }}
|
||||
nodePort: {{ .Values.service.manager.nodePort }}
|
||||
{{- end }}
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-default-setting
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
default-setting.yaml: |-
|
||||
backup-target: {{ .Values.defaultSettings.backupTarget }}
|
||||
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
|
||||
allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}
|
||||
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
|
||||
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
|
||||
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
|
||||
replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}
|
||||
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
|
||||
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
|
||||
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
|
||||
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
|
||||
default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}
|
||||
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
|
||||
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
|
||||
taint-toleration: {{ .Values.defaultSettings.taintToleration }}
|
||||
system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }}
|
||||
priority-class: {{ .Values.defaultSettings.priorityClass }}
|
||||
auto-salvage: {{ .Values.defaultSettings.autoSalvage }}
|
||||
auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}
|
||||
disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}
|
||||
replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}
|
||||
node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}
|
||||
allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }}
|
||||
mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }}
|
||||
disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }}
|
||||
replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}
|
||||
concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}
|
||||
disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}
|
||||
system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}
|
||||
allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}
|
||||
auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}
|
||||
concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}
|
||||
backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}
|
||||
backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}
|
||||
guaranteed-engine-manager-cpu: {{ .Values.defaultSettings.guaranteedEngineManagerCPU }}
|
||||
guaranteed-replica-manager-cpu: {{ .Values.defaultSettings.guaranteedReplicaManagerCPU }}
|
|
@ -0,0 +1,104 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: longhorn-driver-deployer
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-driver-deployer
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-driver-deployer
|
||||
spec:
|
||||
initContainers:
|
||||
- name: wait-longhorn-manager
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
|
||||
containers:
|
||||
- name: longhorn-driver-deployer
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- deploy-driver
|
||||
- --manager-image
|
||||
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
|
||||
- --manager-url
|
||||
- http://longhorn-backend:9500/v1
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: SERVICE_ACCOUNT
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
{{- if .Values.csi.kubeletRootDir }}
|
||||
- name: KUBELET_ROOT_DIR
|
||||
value: {{ .Values.csi.kubeletRootDir }}
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
|
||||
- name: CSI_ATTACHER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
|
||||
- name: CSI_PROVISIONER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
|
||||
- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
|
||||
- name: CSI_RESIZER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
|
||||
{{- end }}
|
||||
{{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
|
||||
- name: CSI_SNAPSHOTTER_IMAGE
|
||||
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
|
||||
{{- end }}
|
||||
{{- if .Values.csi.attacherReplicaCount }}
|
||||
- name: CSI_ATTACHER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.attacherReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.provisionerReplicaCount }}
|
||||
- name: CSI_PROVISIONER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.provisionerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.resizerReplicaCount }}
|
||||
- name: CSI_RESIZER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.resizerReplicaCount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.csi.snapshotterReplicaCount }}
|
||||
- name: CSI_SNAPSHOTTER_REPLICA_COUNT
|
||||
value: {{ .Values.csi.snapshotterReplicaCount | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornDriver.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
securityContext:
|
||||
runAsUser: 0
|
|
@ -0,0 +1,72 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
name: longhorn-ui
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: longhorn-ui
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
app: longhorn-ui
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-ui
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
env:
|
||||
- name: LONGHORN_MANAGER_IP
|
||||
value: "http://longhorn-backend:9500"
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornUI.priorityClass | quote}}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornUI.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ui
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
{{- end }}
|
||||
name: longhorn-frontend
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
spec:
|
||||
{{- if eq .Values.service.ui.type "Rancher-Proxy" }}
|
||||
type: ClusterIP
|
||||
{{- else }}
|
||||
type: {{ .Values.service.ui.type }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: longhorn-ui
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
{{- if .Values.service.ui.nodePort }}
|
||||
nodePort: {{ .Values.service.ui.nodePort }}
|
||||
{{- else }}
|
||||
nodePort: null
|
||||
{{- end }}
|
|
@ -0,0 +1,48 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
app: longhorn-ingress
|
||||
annotations:
|
||||
{{- if .Values.ingress.tls }}
|
||||
ingress.kubernetes.io/secure-backends: "true"
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.ingress.annotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- host: {{ .Values.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "" .Values.ingress.path }}
|
||||
{{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: ImplementationSpecific
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: longhorn-frontend
|
||||
port:
|
||||
number: 80
|
||||
{{- else }}
|
||||
serviceName: longhorn-frontend
|
||||
servicePort: 80
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Values.ingress.host }}
|
||||
secretName: {{ .Values.ingress.tlsSecret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
|
||||
name: longhorn-post-upgrade
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-post-upgrade
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-post-upgrade
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- post-upgrade
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,66 @@
|
|||
{{- if .Values.enablePSP }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: longhorn-psp
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
requiredDropCapabilities:
|
||||
- NET_RAW
|
||||
allowedCapabilities:
|
||||
- SYS_ADMIN
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- secret
|
||||
- projected
|
||||
- hostPath
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: longhorn-psp-role
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- longhorn-psp
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: longhorn-psp-binding
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: longhorn-psp-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.privateRegistry.registrySecret }}
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: {{ template "secret" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: longhorn-service-account
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
name: longhorn-engine-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
longhorn.io/component: instance-manager
|
||||
longhorn.io/instance-manager-type: engine
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
name: longhorn-replica-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
longhorn.io/component: instance-manager
|
||||
longhorn.io/instance-manager-type: replica
|
|
@ -0,0 +1,34 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: longhorn-storageclass
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
data:
|
||||
storageclass.yaml: |
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: longhorn
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
|
||||
provisioner: driver.longhorn.io
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
|
||||
volumeBindingMode: Immediate
|
||||
parameters:
|
||||
numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
|
||||
staleReplicaTimeout: "30"
|
||||
fromBackup: ""
|
||||
{{- if .Values.persistence.defaultFsType }}
|
||||
fsType: "{{.Values.persistence.defaultFsType}}"
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.backingImage.enable }}
|
||||
backingImage: {{ .Values.persistence.backingImage.name }}
|
||||
backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
|
||||
backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
|
||||
backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.recurringJobSelector.enable }}
|
||||
recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
|
||||
{{- end }}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ include "release_namespace" $ }}
|
||||
labels: {{- include "longhorn.labels" $ | nindent 4 }}
|
||||
app: longhorn
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,49 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
name: longhorn-uninstall
|
||||
namespace: {{ include "release_namespace" . }}
|
||||
labels: {{- include "longhorn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
activeDeadlineSeconds: 900
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
name: longhorn-uninstall
|
||||
labels: {{- include "longhorn.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-uninstall
|
||||
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- longhorn-manager
|
||||
- uninstall
|
||||
- --force
|
||||
env:
|
||||
- name: LONGHORN_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.privateRegistry.registrySecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.privateRegistry.registrySecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.priorityClass }}
|
||||
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
|
||||
{{- end }}
|
||||
serviceAccountName: longhorn-service-account
|
||||
{{- if .Values.longhornManager.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.longhornManager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-admin"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
|
||||
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
|
||||
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
|
||||
"recurringjobs", "recurringjobs/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-edit"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
|
||||
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
|
||||
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
|
||||
"recurringjobs", "recurringjobs/status"]
|
||||
verbs: [ "*" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "longhorn-view"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rules:
|
||||
- apiGroups: [ "longhorn.io" ]
|
||||
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
|
||||
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
|
||||
"sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
|
||||
"backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
|
||||
"backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
|
||||
"recurringjobs", "recurringjobs/status"]
|
||||
verbs: [ "get", "list", "watch" ]
|
|
@ -0,0 +1,28 @@
|
|||
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
|
||||
# {{- $found := dict -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Engine" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Replica" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Setting" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Volume" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Node" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackingImageDataSource" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackupTarget" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/BackupVolume" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/Backup" false -}}
|
||||
# {{- set $found "longhorn.io/v1beta1/RecurringJob" false -}}
|
||||
# {{- range .Capabilities.APIVersions -}}
|
||||
# {{- if hasKey $found (toString .) -}}
|
||||
# {{- set $found (toString .) true -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
# {{- range $_, $exists := $found -}}
|
||||
# {{- if (eq $exists false) -}}
|
||||
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
|
||||
# {{- end -}}
|
||||
# {{- end -}}
|
||||
#{{- end -}}
|
|
@ -0,0 +1,228 @@
|
|||
# Default values for longhorn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
repository: rancher/mirrored-longhornio-longhorn-engine
|
||||
tag: v1.2.3
|
||||
manager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-manager
|
||||
tag: v1.2.3
|
||||
ui:
|
||||
repository: rancher/mirrored-longhornio-longhorn-ui
|
||||
tag: v1.2.3
|
||||
instanceManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-instance-manager
|
||||
tag: v1_20211210
|
||||
shareManager:
|
||||
repository: rancher/mirrored-longhornio-longhorn-share-manager
|
||||
tag: v1_20211020
|
||||
backingImageManager:
|
||||
repository: rancher/mirrored-longhornio-backing-image-manager
|
||||
tag: v2_20210820
|
||||
csi:
|
||||
attacher:
|
||||
repository: rancher/mirrored-longhornio-csi-attacher
|
||||
tag: v3.2.1
|
||||
provisioner:
|
||||
repository: rancher/mirrored-longhornio-csi-provisioner
|
||||
tag: v2.1.2
|
||||
nodeDriverRegistrar:
|
||||
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
|
||||
tag: v2.3.0
|
||||
resizer:
|
||||
repository: rancher/mirrored-longhornio-csi-resizer
|
||||
tag: v1.2.0
|
||||
snapshotter:
|
||||
repository: rancher/mirrored-longhornio-csi-snapshotter
|
||||
tag: v3.0.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: ClusterIP
|
||||
nodePort: null
|
||||
manager:
|
||||
type: ClusterIP
|
||||
nodePort: ""
|
||||
|
||||
persistence:
|
||||
defaultClass: true
|
||||
defaultFsType: ext4
|
||||
defaultClassReplicaCount: 3
|
||||
reclaimPolicy: Delete
|
||||
recurringJobSelector:
|
||||
enable: false
|
||||
jobList: []
|
||||
backingImage:
|
||||
enable: false
|
||||
name: ~
|
||||
dataSourceType: ~
|
||||
dataSourceParameters: ~
|
||||
expectedChecksum: ~
|
||||
|
||||
csi:
|
||||
kubeletRootDir: ~
|
||||
attacherReplicaCount: ~
|
||||
provisionerReplicaCount: ~
|
||||
resizerReplicaCount: ~
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
backupTarget: ~
|
||||
backupTargetCredentialSecret: ~
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
defaultDataPath: ~
|
||||
defaultDataLocality: ~
|
||||
replicaSoftAntiAffinity: ~
|
||||
replicaAutoBalance: ~
|
||||
storageOverProvisioningPercentage: ~
|
||||
storageMinimalAvailablePercentage: ~
|
||||
upgradeChecker: ~
|
||||
defaultReplicaCount: ~
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
backupstorePollInterval: ~
|
||||
taintToleration: ~
|
||||
systemManagedComponentsNodeSelector: ~
|
||||
priorityClass: ~
|
||||
autoSalvage: ~
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
allowNodeDrainWithLastHealthyReplica: ~
|
||||
mkfsExt4Parameters: ~
|
||||
disableReplicaRebuild: ~
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
concurrentReplicaRebuildPerNodeLimit: ~
|
||||
disableRevisionCounter: ~
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
concurrentAutomaticEngineUpgradePerNodeLimit: ~
|
||||
backingImageCleanupWaitInterval: ~
|
||||
backingImageRecoveryWaitInterval: ~
|
||||
guaranteedEngineManagerCPU: ~
|
||||
guaranteedReplicaManagerCPU: ~
|
||||
privateRegistry:
|
||||
registryUrl: ~
|
||||
registryUser: ~
|
||||
registryPasswd: ~
|
||||
registrySecret: ~
|
||||
|
||||
longhornManager:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornDriver:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornUI:
|
||||
priorityClass: ~
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
#
|
||||
|
||||
ingress:
|
||||
## Set to true to enable ingress record generation
|
||||
enabled: false
|
||||
|
||||
## Add ingressClassName to the Ingress
|
||||
## Can replace the kubernetes.io/ingress.class annotation on v1.18+
|
||||
ingressClassName: ~
|
||||
|
||||
host: xip.io
|
||||
|
||||
## Set this to true in order to enable TLS on the ingress record
|
||||
## A side effect of this will be that the backend service will be connected at port 443
|
||||
tls: false
|
||||
|
||||
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
## Ingress annotations done as key:value pairs
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## -----BEGIN RSA PRIVATE KEY-----
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
# - name: longhorn.local-tls
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# Configure a pod security policy in the Longhorn namespace to allow privileged pods
|
||||
enablePSP: true
|
||||
|
||||
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
|
||||
## and its release namespace is not the `longhorn-system`
|
||||
namespaceOverride: ""
|
||||
|
||||
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
|
||||
annotations: {}
|
|
@ -0,0 +1,23 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/display-name: Alerting Drivers
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/rancher-version: '>= 2.5.0-0 <= 2.5.99-0'
|
||||
catalog.cattle.io/release-name: rancher-alerting-drivers
|
||||
apiVersion: v2
|
||||
appVersion: 1.16.0
|
||||
dependencies:
|
||||
- condition: prom2teams.enabled
|
||||
name: prom2teams
|
||||
repository: file://./charts/prom2teams
|
||||
- condition: sachet.enabled
|
||||
name: sachet
|
||||
repository: file://./charts/sachet
|
||||
description: The manager for third-party webhook receivers used in Prometheus Alertmanager
|
||||
icon: https://charts.rancher.io/assets/logos/alerting-drivers.svg
|
||||
keywords:
|
||||
- monitoring
|
||||
- alertmanger
|
||||
- webhook
|
||||
name: rancher-alerting-drivers
|
||||
version: 1.0.301
|
|
@ -0,0 +1,11 @@
|
|||
# Rancher Alerting Drivers
|
||||
|
||||
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
|
||||
|
||||
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
|
||||
|
||||
Currently, this chart supports the following Drivers:
|
||||
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
|
||||
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
|
||||
|
||||
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.
|
|
@ -0,0 +1,11 @@
|
|||
# Rancher Alerting Drivers
|
||||
|
||||
This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers).
|
||||
|
||||
Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported.
|
||||
|
||||
Currently, this chart supports the following Drivers:
|
||||
- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams)
|
||||
- SMS, based on [Sachet](https://github.com/messagebird/sachet)
|
||||
|
||||
After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options.
|
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,10 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: rancher-prom2teams
|
||||
apiVersion: v1
|
||||
appVersion: 3.2.3
|
||||
description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams
|
||||
name: prom2teams
|
||||
version: 0.2.2
|
|
@ -0,0 +1,44 @@
|
|||
{%- set
|
||||
theme_colors = {
|
||||
'resolved' : '2DC72D',
|
||||
'critical' : '8C1A1A',
|
||||
'severe' : '8C1A1A',
|
||||
'warning' : 'FF9A0B',
|
||||
'unknown' : 'CCCCCC'
|
||||
}
|
||||
-%}
|
||||
|
||||
{
|
||||
"@type": "MessageCard",
|
||||
"@context": "http://schema.org/extensions",
|
||||
"themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}",
|
||||
"summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}",
|
||||
"title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}",
|
||||
"sections": [{
|
||||
"activityTitle": "{{ msg_text.summary }}",
|
||||
"facts": [{% if msg_text.name %}{
|
||||
"name": "Alert",
|
||||
"value": "{{ msg_text.name }}"
|
||||
},{% endif %}{% if msg_text.instance %}{
|
||||
"name": "In host",
|
||||
"value": "{{ msg_text.instance }}"
|
||||
},{% endif %}{% if msg_text.severity %}{
|
||||
"name": "Severity",
|
||||
"value": "{{ msg_text.severity }}"
|
||||
},{% endif %}{% if msg_text.description %}{
|
||||
"name": "Description",
|
||||
"value": "{{ msg_text.description }}"
|
||||
},{% endif %}{
|
||||
"name": "Status",
|
||||
"value": "{{ msg_text.status }}"
|
||||
}{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{
|
||||
"name": "{{ key }}",
|
||||
"value": "{{ msg_text.extra_labels[key] }}"
|
||||
}{% endfor %}{% endif %}
|
||||
{% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{
|
||||
"name": "{{ key }}",
|
||||
"value": "{{ msg_text.extra_annotations[key] }}"
|
||||
}{% endfor %}{% endif %}],
|
||||
"markdown": true
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
Prom2Teams has been installed. Check its status by running:
|
||||
kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}"
|
|
@ -0,0 +1,73 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
{{- define "linux-node-selector" -}}
|
||||
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
beta.kubernetes.io/os: linux
|
||||
{{- else -}}
|
||||
kubernetes.io/os: linux
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prom2teams.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prom2teams.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "prom2teams.namespace" -}}
|
||||
{{ default .Release.Namespace .Values.global.namespaceOverride }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prom2teams.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,39 @@
|
|||
{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}}
|
||||
{{- if not (has .Values.prom2teams.loglevel $valid) -}}
|
||||
{{- fail "Invalid log level"}}
|
||||
{{- end -}}
|
||||
{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}}
|
||||
{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}}
|
||||
{{- end -}}
|
||||
{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}}
|
||||
{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/resource-policy": keep
|
||||
data:
|
||||
config.ini: |-
|
||||
[HTTP Server]
|
||||
Host: {{ .Values.prom2teams.host }}
|
||||
Port: {{ .Values.prom2teams.port }}
|
||||
[Microsoft Teams]
|
||||
{{- with .Values.prom2teams.connector }}
|
||||
Connector: {{ . }}
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.prom2teams.connectors }}
|
||||
{{ $key }}: {{ $val }}
|
||||
{{- end }}
|
||||
[Group Alerts]
|
||||
Field: {{ .Values.prom2teams.group_alerts_by }}
|
||||
[Log]
|
||||
Level: {{ .Values.prom2teams.loglevel }}
|
||||
[Template]
|
||||
Path: {{ .Values.prom2teams.templatepath }}
|
||||
teams.j2: {{ .Files.Get "files/teams.j2" | quote }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,77 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "prom2teams.fullname" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8089
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /opt/prom2teams/helmconfig/
|
||||
env:
|
||||
- name: APP_CONFIG_FILE
|
||||
value: {{ .Values.prom2teams.config | quote }}
|
||||
- name: PROM2TEAMS_PORT
|
||||
value: {{ .Values.prom2teams.port | quote }}
|
||||
- name: PROM2TEAMS_HOST
|
||||
value: {{ .Values.prom2teams.ip | quote }}
|
||||
- name: PROM2TEAMS_CONNECTOR
|
||||
value: {{ .Values.prom2teams.connector | quote }}
|
||||
- name: PROM2TEAMS_GROUP_ALERTS_BY
|
||||
value: {{ .Values.prom2teams.group_alerts_by | quote }}
|
||||
resources: {{ toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{- toYaml .Values.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}
|
||||
runAsUser: {{ .Values.securityContext.runAsUser }}
|
||||
runAsGroup: {{ .Values.securityContext.runAsGroup }}
|
||||
fsGroup: {{ .Values.securityContext.fsGroup }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- {{ include "prom2teams.fullname" . }}-psp
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "prom2teams.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "prom2teams.fullname" . }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels: {{ include "prom2teams.labels" . | nindent 4 }}
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prom2teams.fullname" . }}
|
||||
namespace: {{ include "prom2teams.namespace" . }}
|
||||
labels:
|
||||
{{ include "prom2teams.labels" . | indent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: 8089
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "prom2teams.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -0,0 +1,62 @@
|
|||
# Default values for prom2teams.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
global:
|
||||
cattle:
|
||||
systemDefaultRegistry: ""
|
||||
namespaceOverride: ""
|
||||
|
||||
nameOverride: "prom2teams"
|
||||
fullnameOverride: ""
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/mirrored-idealista-prom2teams
|
||||
tag: 3.2.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8089
|
||||
|
||||
prom2teams:
|
||||
host: 0.0.0.0
|
||||
port: 8089
|
||||
connector: the-connector-url
|
||||
connectors: {}
|
||||
# group_alerts_by can be one of
|
||||
# ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url")
|
||||
group_alerts_by:
|
||||
# loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL)
|
||||
loglevel: INFO
|
||||
templatepath: /opt/prom2teams/helmconfig/teams.j2
|
||||
config: /opt/prom2teams/helmconfig/config.ini
|
||||
|
||||
# Security Context properties
|
||||
securityContext:
|
||||
# enabled is a flag to enable Security Context
|
||||
enabled: true
|
||||
# runAsUser is the user ID used to run the container
|
||||
runAsUser: 65534
|
||||
# runAsGroup is the primary group ID used to run all processes within any container of the pod
|
||||
runAsGroup: 65534
|
||||
# fsGroup is the group ID associated with the container
|
||||
fsGroup: 65534
|
||||
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,11 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: rancher
|
||||
catalog.cattle.io/hidden: "true"
|
||||
catalog.cattle.io/os: linux
|
||||
catalog.cattle.io/release-name: rancher-sachet
|
||||
apiVersion: v2
|
||||
appVersion: 0.2.6
|
||||
description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet
|
||||
name: sachet
|
||||
type: application
|
||||
version: 1.0.3
|
|
@ -0,0 +1 @@
|
|||
# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl
|
|
@ -0,0 +1,3 @@
|
|||
rancher-sachet is now installed on the cluster!
|
||||
Please refer to the upstream documentation for configuration options:
|
||||
https://github.com/messagebird/sachet
|
|
@ -0,0 +1,79 @@
|
|||
{{- define "system_default_registry" -}}
|
||||
{{- if .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Windows cluster will add default taint for linux nodes,
|
||||
add below linux tolerations to workloads could be scheduled to those linux nodes
|
||||
*/}}
|
||||
|
||||
{{- define "linux-node-tolerations" -}}
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
{{- end -}}
|
||||
|
||||
{{- define "linux-node-selector" -}}
|
||||
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
beta.kubernetes.io/os: linux
|
||||
{{- else -}}
|
||||
kubernetes.io/os: linux
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "sachet.namespace" -}}
|
||||
{{ default .Release.Namespace .Values.global.namespaceOverride }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "sachet.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "sachet.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "sachet.labels" -}}
|
||||
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{ include "sachet.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "sachet.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "sachet.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}}
|
||||
{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/resource-policy": keep
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- if and (not .Values.sachet.providers) (not .Values.sachet.receivers) }}
|
||||
# please refer to the upstream documentation for configuration options:
|
||||
# https://github.com/messagebird/sachet
|
||||
#
|
||||
# providers:
|
||||
# aliyun:
|
||||
# region_id:
|
||||
# ...
|
||||
# receivers:
|
||||
# - name: 'team-sms'
|
||||
# provider: 'aliyu'
|
||||
# ...
|
||||
{{- end }}
|
||||
{{- with .Values.sachet.providers }}
|
||||
providers: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sachet.receivers }}
|
||||
receivers: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,75 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels: {{ include "sachet.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{- toYaml .Values.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "sachet.fullname" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9876
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/live
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: http
|
||||
volumeMounts:
|
||||
- mountPath: /etc/sachet/
|
||||
name: config-volume
|
||||
{{- with .Values.resources }}
|
||||
resources: {{ toYaml .Values.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: config-reloader
|
||||
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }}
|
||||
imagePullPolicy: {{ .Values.configReloader.pullPolicy }}
|
||||
args:
|
||||
- -volume-dir=/watch-config
|
||||
- -webhook-method=POST
|
||||
- -webhook-status-code=200
|
||||
- -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
|
||||
volumeMounts:
|
||||
- mountPath: /watch-config
|
||||
name: config-volume
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
defaultMode: 0777
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- {{ include "sachet.fullname" . }}-psp
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "sachet.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "sachet.fullname" . }}
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "sachet.fullname" . }}
|
||||
namespace: {{ include "sachet.namespace" . }}
|
||||
labels: {{ include "sachet.labels" . | nindent 4 }}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue