[dev-v2.9] Forward port from dev-v2.8 after release 2.8.7 (#4370)

pull/4373/head
Nicholas openSUSE Software Engineer 2024-08-20 19:49:43 -03:00 committed by GitHub
parent 2ca59be5ba
commit c430aa2e2d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
274 changed files with 91033 additions and 39 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
state.json
logs/* logs/*
bin bin
*.DS_Store *.DS_Store

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/release-name: elemental-operator-crds
apiVersion: v2
appVersion: 1.5.4
description: A Helm chart for deploying Rancher Elemental Operator CRDs
name: elemental-crd
type: application
version: 103.3.1+up1.5.4

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
{{- $inventoryCRD := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" .Release.Namespace "machineinventories.elemental.cattle.io" -}}
{{- if $inventoryCRD -}}
{{- if $inventoryCRD.metadata.deletionTimestamp -}}
{{- required "CRDs from previous installations are pending to be removed (deletionTimestamp is set). Fully deleting them before (re-)installing is required" "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: elemental-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Elemental
catalog.cattle.io/kube-version: '>= 1.23.0-0'
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/provides-gvr: elemental.cattle.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: elemental-operator
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.5.4
apiVersion: v2
appVersion: 1.5.4
description: Elemental provides Cloud Native OS Management for Cluster Nodes.
icon: https://raw.githubusercontent.com/rancher/elemental/main/logo/icon-elemental.svg
name: elemental
version: 103.3.1+up1.5.4

View File

@ -0,0 +1,5 @@
# Elemental Operator Helm Chart
This chart bootstraps an elemental-operator deployment on a [Rancher Manager](https://rancher.com/docs/rancher/) cluster using the [Helm](https://helm.sh) package manager.
Check out the [Elemental Operator Helm Chart documentation](https://elemental.docs.rancher.com/elementaloperatorchart-reference/) in the official [Elemental guide](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,5 @@
# Elemental
Elemental brings to Rancher the ability to install and manage the OS of bare metal and virtualized machines.
For more information on how to deploy an Elemental Cluster, follow the [official documentation](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,27 @@
questions:
- variable: channel.defaultChannel
default: "true"
description: "Provide an Elemental OS Channel container image"
label: Elemental OS Channel
type: boolean
show_subquestion_if: true
group: "Elemental OS Channel"
subquestions:
- variable: channel.image
default: "registry.suse.com/rancher/elemental-channel"
description: "Specify the Elemental OS channel: for air-gapped scenarios you need to provide your own OS channel image (see https://elemental.docs.rancher.com/airgap for detailed instructions)"
type: string
label: Elemental OS Channel Image
group: "Elemental OS Channel"
- variable: channel.tag
default: "1.5.4"
description: "Specify Elemental OS channel image tag"
type: string
label: "Elemental OS Channel Tag"
group: "Elemental OS Channel"
- variable: debug
default: "false"
description: "Enable debug logging in the Elemental operator"
type: boolean
label: "Enable Debug Logging"
group: "Logging"

View File

@ -0,0 +1,17 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{ include "system_default_registry" . }}
{{- else if .Values.registryUrl -}}
{{- printf "%s/" .Values.registryUrl -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,9 @@
kind: APIService
apiVersion: management.cattle.io/v3
metadata:
name: {{ .Release.Name }}
spec:
secretName: elemental-operator
secretNamespace: {{ .Release.Namespace }}
pathPrefixes:
- /elemental/

View File

@ -0,0 +1,10 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elemental-capi-role
labels:
cluster.x-k8s.io/aggregate-to-manager: "true"
rules:
- apiGroups: ["elemental.cattle.io"]
resources: ["*"]
verbs: ["*"]

View File

@ -0,0 +1,11 @@
{{ if and .Values.channel .Values.channel.image .Values.channel.tag }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: elemental-channel
namespace: fleet-default
spec:
options:
image: {{ .Values.channel.image }}:{{ .Values.channel.tag }}
type: custom
{{ end }}

View File

@ -0,0 +1,267 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: '{{ .Release.Name }}'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
verbs:
- get
- list
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- seedimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- seedimages/status
verbs:
- get
- patch
- update
- apiGroups:
- fleet.cattle.io
resources:
- bundles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- management.cattle.io
resources:
- settings
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- create
- delete
- list
- watch

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: elemental-operator
template:
metadata:
labels:
app: elemental-operator
spec:
containers:
- env:
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
{{- end }}
name: {{ .Release.Name }}
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
image: {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
args:
- operator
{{- if .Values.debug }}
- --v=5
- --debug
{{- end }}
- --namespace
- {{ .Release.Namespace }}
- --operator-image
- {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
- --seedimage-image
- {{ template "registry_url" . }}{{ .Values.seedImage.repository }}:{{ .Values.seedImage.tag | default .Chart.AppVersion }}
- --seedimage-image-pullpolicy
- {{ .Values.seedImage.imagePullPolicy}}
serviceAccountName: {{ .Release.Name }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,16 @@
apiVersion: management.cattle.io/v3
builtin: false
description: "Elemental Administrator Role"
displayName: Elemental Administrator
kind: GlobalRole
metadata:
labels:
cattle.io/creator: norman
name: {{ .Release.Name }}
rules:
- apiGroups:
- elemental.cattle.io
resources:
- '*'
verbs:
- '*'

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}

View File

@ -0,0 +1,25 @@
{{ if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 }}
{{ $apis := dict
"elemental.cattle.io/v1beta1/MachineInventory" "machineinventories"
"elemental.cattle.io/v1beta1/MachineInventorySelector" "machineinventoryselectors"
"elemental.cattle.io/v1beta1/MachineInventorySelectorTemplate" "machineinventoryselectortemplates"
"elemental.cattle.io/v1beta1/MachineRegistration" "machineregistrations"
"elemental.cattle.io/v1beta1/ManagedOSImage" "managedosimages"
"elemental.cattle.io/v1beta1/ManagedOSVersionChannel" "managedosversionchannels"
"elemental.cattle.io/v1beta1/ManagedOSVersion" "managedosversions"
"elemental.cattle.io/v1beta1/SeedImage" "seedimages"
}}
{{- range $api, $crd := $apis -}}
{{- if not ($.Capabilities.APIVersions.Has $api) -}}
{{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
{{- end -}}
{{- $crdobj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" (print $crd ".elemental.cattle.io") -}}
{{- if not $crdobj -}}
{{- print "Cannot lookup " $crd ".elemental.cattle.io crd object" | fail -}}
{{- end -}}
{{- $crdrelease := index $crdobj.metadata.annotations "meta.helm.sh/release-name" -}}
{{- if eq $crdrelease $.Release.Name -}}
{{- required "Elemental CRDs should be moved to the new elemental-operator-crds chart before upgrading this operator." "" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,42 @@
image:
empty: rancher/pause:3.1
repository: "rancher/mirrored-elemental-operator"
tag: "1.5.4"
imagePullPolicy: IfNotPresent
seedImage:
repository: "rancher/mirrored-elemental-seedimage-builder"
tag: "1.5.4"
imagePullPolicy: IfNotPresent
channel:
image: "registry.suse.com/rancher/elemental-channel"
tag: "1.5.4"
# number of operator replicas to deploy
replicas: 1
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
# used only if systemDefaultRegistry is empty
registryUrl: ""
# enable debug output for operator
debug: false
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.9.8
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 103.1.7+up0.9.8

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,13 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}",
"agentTLSMode": "{{.Values.agentTLSMode}}"
}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-agent
spec:
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
serviceAccountName: fleet-agent
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,28 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,67 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.9.8
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
#labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.21.5
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.9.8
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 103.1.7+up0.9.8

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.9.8
dependencies:
- condition: gitops.enabled
name: gitjob
repository: file://./charts/gitjob
description: Fleet Manager - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
version: 103.1.7+up0.9.8

View File

@ -0,0 +1,30 @@
# Fleet Helm Chart
Fleet is GitOps at scale. Fleet is designed to manage multiple clusters.
## What is Fleet?
* Cluster engine: Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Fleet focuses not only on the ability to scale, but it also gives users a high degree of control and visibility to monitor exactly what is installed on the cluster.
* Deployment management: Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, Kustomize, or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy all resources in the cluster. As a result, users can enjoy a high degree of control, consistency, and auditability of their clusters.
## Introduction
This chart deploys Fleet on a Kubernetes cluster. It also deploys some of its dependencies as subcharts.
The documentation is centralized in the [doc website](https://fleet.rancher.io/).
## Prerequisites
Get helm if you don't have it. Helm 3 is just a CLI.
## Install Fleet
Install the Fleet Helm charts (there are two because we separate out CRDs for ultimate flexibility.):
```
$ helm repo add fleet https://rancher.github.io/fleet-helm-charts/
$ helm -n cattle-fleet-system install --create-namespace --wait fleet-crd fleet/fleet-crd
$ helm -n cattle-fleet-system install --create-namespace --wait fleet fleet/fleet
```

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
appVersion: 0.9.13
description: Controller that run jobs based on git events
name: gitjob
version: 0.9.13

View File

@ -0,0 +1,7 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,38 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gitjob
rules:
- apiGroups:
- "batch"
resources:
- 'jobs'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'pods'
verbs:
- 'list'
- 'get'
- 'watch'
- apiGroups:
- ""
resources:
- 'secrets'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'configmaps'
verbs:
- '*'
- apiGroups:
- "gitjob.cattle.io"
resources:
- "gitjobs"
- "gitjobs/status"
verbs:
- "*"

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitjob-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitjob
spec:
selector:
matchLabels:
app: "gitjob"
template:
metadata:
labels:
app: "gitjob"
spec:
serviceAccountName: gitjob
containers:
- image: "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
name: gitjob
args:
- gitjob
- --gitjob-image
- "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
{{- if .Values.debug }}
- --debug
{{- end }}
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}

View File

@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitjob
rules:
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitjob
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: gitjob
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: "gitjob"

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitjob

View File

@ -0,0 +1,27 @@
gitjob:
repository: rancher/gitjob
tag: v0.9.13
global:
cattle:
systemDefaultRegistry: ""
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
# PriorityClassName assigned to deployment.
priorityClassName: ""
debug: false

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fleet-controller
data:
config: |
{
"systemDefaultRegistry": "{{ template "system_default_registry" . }}",
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
"apiServerURL": "{{.Values.apiServerURL}}",
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
"agentTLSMode": "{{.Values.agentTLSMode}}",
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
"bootstrap": {
"paths": "{{.Values.bootstrap.paths}}",
"repo": "{{.Values.bootstrap.repo}}",
"secret": "{{.Values.bootstrap.secret}}",
"branch": "{{.Values.bootstrap.branch}}",
"namespace": "{{.Values.bootstrap.namespace}}",
"agentNamespace": "{{.Values.bootstrap.agentNamespace}}",
},
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
}

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-controller
spec:
selector:
matchLabels:
app: fleet-controller
template:
metadata:
labels:
app: fleet-controller
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS
value: {{ quote .Values.propagateDebugSettingsToAgents }}
{{- if .Values.clusterEnqueueDelay }}
- name: FLEET_CLUSTER_ENQUEUE_DELAY
value: {{ .Values.clusterEnqueueDelay }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_DIR
value: /tmp/pprof/
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_PERIOD
value: {{ quote .Values.cpuPprof.period }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
name: fleet-controller
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
command:
- fleetcontroller
{{- if not .Values.gitops.enabled }}
- --disable-gitops
{{- end }}
{{- if not .Values.bootstrap.enabled }}
- --disable-bootstrap
{{- end }}
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
{{- if .Values.cpuPprof }}
- mountPath: /tmp/pprof
name: pprof
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- if .Values.cpuPprof }}
- name: pprof {{ toYaml .Values.cpuPprof.volumeConfiguration | nindent 10 }}
{{- end }}
serviceAccountName: fleet-controller
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if .Values.migrations.clusterRegistrationCleanup }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: fleet-cleanup-clusterregistrations
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: fleet-controller
restartPolicy: Never
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
privileged: false
command:
- fleet
args:
- cleanup
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,114 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller
rules:
- apiGroups:
- gitjob.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- fleet.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: fleet-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller-bootstrap
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller-bootstrap
subjects:
- kind: ServiceAccount
name: fleet-controller-bootstrap
namespace: {{.Release.Namespace}}
{{- end }}

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller-bootstrap
{{- end }}

View File

@ -0,0 +1,87 @@
image:
repository: rancher/fleet
tag: v0.9.8
imagePullPolicy: IfNotPresent
agentImage:
repository: rancher/fleet-agent
tag: v0.9.8
imagePullPolicy: IfNotPresent
# For cluster registration the public URL of the Kubernetes API server must be set here
# Example: https://example.com:6443
apiServerURL: ""
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# A duration string for how often agents should report a heartbeat
agentCheckinInterval: "15m"
# Whether you want to allow cluster upon registration to specify their labels.
ignoreClusterRegistrationLabels: false
# Counts from gitrepo are out of sync with bundleDeployment state.
# Just retry in a number of seconds as there is no great way to trigger an event that doesn't cause a loop.
# If not set default is 15 seconds.
# clusterEnqueueDelay: 120s
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
bootstrap:
enabled: true
# The namespace that will be autocreated and the local cluster will be registered in
namespace: fleet-local
# The namespace where the fleet agent for the local cluster will be ran, if empty
# this will default to cattle-fleet-system
agentNamespace: ""
# A repo to add at install time that will deploy to the local cluster. This allows
# one to fully bootstrap fleet, its configuration and all its downstream clusters
# in one shot.
repo: ""
secret: ""
branch: master
paths: ""
global:
cattle:
systemDefaultRegistry: ""
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""
gitops:
enabled: true
debug: false
debugLevel: 0
propagateDebugSettingsToAgents: true
## Optional CPU pprof configuration. Profiles are collected continuously and saved every period
## Any valid volume configuration can be provided, the example below uses hostPath
#cpuPprof:
# period: "60s"
# volumeConfiguration:
# hostPath:
# path: /tmp/pprof
# type: DirectoryOrCreate
migrations:
clusterRegistrationCleanup: true

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-csi-driver
catalog.cattle.io/ui-component: harvester-csi-driver
catalog.cattle.io/upstream-version: 0.1.18
apiVersion: v2
appVersion: v0.1.7
description: A Helm chart for Harvester CSI driver
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-csi-driver
type: application
version: 103.0.2+up0.1.18

View File

@ -0,0 +1,11 @@
categories:
- infrastructure
- harvester
namespace: kube-system
questions:
- variable: cloudConfig.hostPath
label: Cloud config file path
description: "Specify the path of the cloud config."
group: "Default"
type: string
default: "/etc/kubernetes/cloud-config"

View File

@ -0,0 +1 @@
Successfully deployed Harvester CSI driver to the {{ .Release.Namespace }} namespace.

View File

@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "harvester-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "harvester-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "harvester-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "harvester-csi-driver.labels" -}}
helm.sh/chart: {{ include "harvester-csi-driver.chart" . }}
{{ include "harvester-csi-driver.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "harvester-csi-driver.selectorLabels" -}}
app.kubernetes.io/name: {{ include "harvester-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Global system default registry
*/}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: driver.harvesterhci.io
spec:
attachRequired: true
fsGroupPolicy: ReadWriteOnceWithFSType
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View File

@ -0,0 +1,149 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --kubelet-registration-path={{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io/csi.sock
env:
- name: ADDRESS
value: /csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/driver.harvesterhci.io-reg.sock
/csi//*
name: node-driver-registrar
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi/
name: socket-dir
- mountPath: /registration
name: registration-dir
- args:
- --nodeid=$(NODE_ID)
- --endpoint=$(CSI_ENDPOINT)
- --kubeconfig=/var/lib/harvester/cloud-provider-config
env:
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.harvester.csiDriver.repository }}:{{ .Values.image.harvester.csiDriver.tag | default .Chart.AppVersion }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -f /csi//*
name: harvester-csi-driver
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- SYS_ADMIN
privileged: true
volumeMounts:
- name: cloud-config
readOnly: true
mountPath: /var/lib/harvester
- name: kubernetes
readOnly: true
mountPath: /etc/kubernetes
- mountPath: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
mountPropagation: Bidirectional
name: kubernetes-csi-dir
- mountPath: /csi/
name: socket-dir
- mountPath: {{ .Values.kubeletRootDir }}/pods
mountPropagation: Bidirectional
name: pods-mount-dir
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /rootfs
mountPropagation: Bidirectional
name: host
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostPID: true
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: cloud-config
{{- if .Values.cloudConfig.secretName }}
secret:
secretName: {{ .Values.cloudConfig.secretName }}
{{- else }}
hostPath:
path: {{ .Values.cloudConfig.hostPath }}
type: DirectoryOrCreate
{{- end }}
- hostPath:
path: /etc/kubernetes
type: DirectoryOrCreate
name: kubernetes
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate
name: kubernetes-csi-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/pods
type: DirectoryOrCreate
name: pods-mount-dir
- hostPath:
path: /dev
name: host-dev
- hostPath:
path: /sys
name: host-sys
- hostPath:
path: /
name: host
- hostPath:
path: /lib/modules
name: lib-modules

View File

@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "harvester-csi-driver.name" . }}-controllers
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicasCount }}
selector:
matchLabels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-resizer
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
- --default-fstype=ext4
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-provisioner
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-attacher
volumeMounts:
- mountPath: /csi/
name: socket-dir
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "harvester-csi-driver.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [ "coordination.k8s.io" ]
resources: [ "leases" ]
verbs: [ "get", "watch", "list", "delete", "update", "create" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csistoragecapacities" ]
verbs: [ "get", "list", "watch", "create", "update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: [ "get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csinodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "apps" ]
resources: [ "replicasets" ]
verbs: [ "get" ]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "volumeattachments/status" ]
verbs: [ "patch" ]

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: harvester
annotations:
storageclass.kubernetes.io/is-default-class: "true"
allowVolumeExpansion: true
provisioner: driver.harvesterhci.io
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -0,0 +1,54 @@
# Default values for harvester-csi-driver.
replicasCount: 3
image:
harvester:
csiDriver:
repository: rancher/harvester-csi-driver
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.1.7"
csi:
nodeDriverRegistrar:
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
tag: v2.3.0
resizer:
repository: rancher/mirrored-longhornio-csi-resizer
tag: v1.2.0
provisioner:
repository: rancher/mirrored-longhornio-csi-provisioner
tag: v2.1.2
attacher:
repository: rancher/mirrored-longhornio-csi-attacher
tag: v3.2.1
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
kubeletRootDir: /var/lib/kubelet
cloudConfig:
secretName: ""
hostPath: "/var/lib/rancher/rke2/etc/config-files/"
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: kubevirt.io/drain
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Equal
- key: cattle.io/os
operator: Equal
value: "linux"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-aks-operator-crd
apiVersion: v2
appVersion: 1.2.4
description: AKS Operator CustomResourceDefinitions
name: rancher-aks-operator-crd
version: 103.4.0+up1.2.4

View File

@ -0,0 +1,211 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: aksclusterconfigs.aks.cattle.io
spec:
group: aks.cattle.io
names:
kind: AKSClusterConfig
plural: aksclusterconfigs
shortNames:
- akscc
singular: aksclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
authBaseUrl:
nullable: true
type: string
authorizedIpRanges:
items:
nullable: true
type: string
nullable: true
type: array
azureCredentialSecret:
nullable: true
type: string
baseUrl:
nullable: true
type: string
clusterName:
nullable: true
type: string
dnsPrefix:
nullable: true
type: string
dnsServiceIp:
nullable: true
type: string
dockerBridgeCidr:
nullable: true
type: string
httpApplicationRouting:
nullable: true
type: boolean
imported:
type: boolean
kubernetesVersion:
nullable: true
type: string
linuxAdminUsername:
nullable: true
type: string
loadBalancerSku:
nullable: true
type: string
logAnalyticsWorkspaceGroup:
nullable: true
type: string
logAnalyticsWorkspaceName:
nullable: true
type: string
managedIdentity:
nullable: true
type: boolean
monitoring:
nullable: true
type: boolean
networkPlugin:
nullable: true
type: string
networkPolicy:
nullable: true
type: string
nodePools:
items:
properties:
availabilityZones:
items:
nullable: true
type: string
nullable: true
type: array
count:
nullable: true
type: integer
enableAutoScaling:
nullable: true
type: boolean
maxCount:
nullable: true
type: integer
maxPods:
nullable: true
type: integer
maxSurge:
nullable: true
type: string
minCount:
nullable: true
type: integer
mode:
nullable: true
type: string
name:
nullable: true
type: string
nodeLabels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
nodeTaints:
items:
nullable: true
type: string
nullable: true
type: array
orchestratorVersion:
nullable: true
type: string
osDiskSizeGB:
nullable: true
type: integer
osDiskType:
nullable: true
type: string
osType:
nullable: true
type: string
vmSize:
nullable: true
type: string
vnetSubnetID:
nullable: true
type: string
type: object
nullable: true
type: array
nodeResourceGroup:
nullable: true
type: string
outboundType:
nullable: true
type: string
podCidr:
nullable: true
type: string
privateCluster:
nullable: true
type: boolean
privateDnsZone:
nullable: true
type: string
resourceGroup:
nullable: true
type: string
resourceLocation:
nullable: true
type: string
serviceCidr:
nullable: true
type: string
sshPublicKey:
nullable: true
type: string
subnet:
nullable: true
type: string
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
userAssignedIdentity:
nullable: true
type: string
virtualNetwork:
nullable: true
type: string
virtualNetworkResourceGroup:
nullable: true
type: string
type: object
status:
properties:
failureMessage:
nullable: true
type: string
phase:
nullable: true
type: string
rbacEnabled:
nullable: true
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-aks-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: aksclusterconfigs.aks.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-aks-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.2.4
description: A Helm chart for provisioning AKS clusters
home: https://github.com/rancher/aks-operator
name: rancher-aks-operator
sources:
- https://github.com/rancher/aks-operator
version: 103.4.0+up1.2.4

View File

@ -0,0 +1,4 @@
You have deployed the Rancher AKS operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions AKS clusters
from AKSClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aks-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch', 'update']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs/status']
verbs: ['update']

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aks-operator
namespace: cattle-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aks-operator
subjects:
- kind: ServiceAccount
name: aks-operator
namespace: cattle-system

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aks-config-operator
namespace: cattle-system
spec:
replicas: 1
selector:
matchLabels:
ke.cattle.io/operator: aks
template:
metadata:
labels:
ke.cattle.io/operator: aks
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
serviceAccountName: aks-operator
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
securityContext:
fsGroup: 1007
runAsUser: 1007
containers:
- name: aks-operator
image: '{{ template "system_default_registry" $ }}{{ $.Values.aksOperator.image.repository }}:{{ $.Values.aksOperator.image.tag }}'
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PROXY
value: {{ .Values.httpProxy }}
- name: HTTPS_PROXY
value: {{ .Values.httpsProxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.additionalTrustedCAs }}
# aks-operator mounts the additional CAs in two places:
volumeMounts:
# This directory is owned by the aks-operator user so c_rehash works here.
- mountPath: /etc/rancher/ssl/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
# This directory is root-owned so c_rehash doesn't work here,
# but the cert is here in case update-ca-certificates is called in the future or by the OS.
- mountPath: /etc/pki/trust/anchors/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 0400
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: cattle-system
name: aks-operator

View File

@ -0,0 +1,23 @@
global:
cattle:
systemDefaultRegistry: ""
aksOperator:
image:
repository: rancher/aks-operator
tag: v1.2.4
httpProxy: ""
httpsProxy: ""
noProxy: ""
additionalTrustedCAs: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: rancher-backup-crd
apiVersion: v2
appVersion: 4.0.3
description: Installs the CRDs for rancher-backup.
name: rancher-backup-crd
type: application
version: 103.0.3+up4.0.3

View File

@ -0,0 +1,3 @@
# Rancher Backup CRD
A Rancher chart that installs the CRDs used by `rancher-backup`.

View File

@ -0,0 +1,141 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: backups.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Backup
plural: backups
singular: backup
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.storageLocation
name: Location
type: string
- jsonPath: .status.backupType
name: Type
type: string
- jsonPath: .status.filename
name: Latest-Backup
type: string
- jsonPath: .spec.resourceSetName
name: ResourceSet
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
encryptionConfigSecretName:
description: Name of the Secret containing the encryption config
nullable: true
type: string
resourceSetName:
description: Name of the ResourceSet CR to use for backup
nullable: true
type: string
retentionCount:
minimum: 1
type: integer
schedule:
description: Cron schedule for recurring backups
example:
Descriptors: '@midnight'
Standard crontab specs: 0 0 * * *
nullable: true
type: string
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- resourceSetName
type: object
status:
properties:
backupType:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
filename:
nullable: true
type: string
lastSnapshotTs:
nullable: true
type: string
nextSnapshotAt:
nullable: true
type: string
observedGeneration:
type: integer
storageLocation:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,118 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: resourcesets.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: ResourceSet
plural: resourcesets
singular: resourceset
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
controllerReferences:
items:
properties:
apiVersion:
nullable: true
type: string
name:
nullable: true
type: string
namespace:
nullable: true
type: string
replicas:
type: integer
resource:
nullable: true
type: string
type: object
nullable: true
type: array
resourceSelectors:
items:
properties:
apiVersion:
nullable: true
type: string
excludeKinds:
items:
nullable: true
type: string
nullable: true
type: array
excludeResourceNameRegexp:
nullable: true
type: string
kinds:
items:
nullable: true
type: string
nullable: true
type: array
kindsRegexp:
nullable: true
type: string
labelSelectors:
nullable: true
properties:
matchExpressions:
items:
properties:
key:
nullable: true
type: string
operator:
nullable: true
type: string
values:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
type: array
matchLabels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
type: object
namespaceRegexp:
nullable: true
type: string
namespaces:
items:
nullable: true
type: string
nullable: true
type: array
resourceNameRegexp:
nullable: true
type: string
resourceNames:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
required:
- apiVersion
type: array
required:
- resourceSelectors
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,122 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: restores.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Restore
plural: restores
singular: restore
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.backupSource
name: Backup-Source
type: string
- jsonPath: .spec.backupFilename
name: Backup-File
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
backupFilename:
nullable: true
type: string
deleteTimeoutSeconds:
maximum: 10
type: integer
encryptionConfigSecretName:
nullable: true
type: string
ignoreErrors:
type: boolean
prune:
nullable: true
type: boolean
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- backupFilename
type: object
status:
properties:
backupSource:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
observedGeneration:
type: integer
restoreCompletionTs:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

Some files were not shown because too many files have changed in this diff Show More