[dev-v2.10] Forward-port after 2.9.4 to dev 2.10 (#4739)

pull/4744/head
Nicholas openSUSE Software Engineer 2024-11-11 21:15:44 -03:00 committed by GitHub
parent aadede2b8e
commit 17577c64c3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
381 changed files with 45357 additions and 67 deletions

View File

@ -1,4 +1,4 @@
# Generate-Regsync-Config action will run for every PR into release-v2.9 branch only after an approval is given
# Generate-Regsync-Config action will run for every PR into release-v2.10 branch only after an approval is given
# It will run make target to generate regsync file and add a commit to the PR updating the regsync file.
# It will then install and run regsync client and do the prime image mirroring.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/release-name: elemental-operator-crds
apiVersion: v2
appVersion: 1.6.5
description: A Helm chart for deploying Rancher Elemental Operator CRDs
name: elemental-crd
type: application
version: 103.4.1+up1.6.5

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
{{- $crds := list
"machineinventories.elemental.cattle.io"
"machineinventoryselectors.elemental.cattle.io"
"machineinventoryselectortemplates.elemental.cattle.io"
"machineregistrations.elemental.cattle.io"
"managedosimages.elemental.cattle.io"
"managedosversionchannels.elemental.cattle.io"
"managedosversions.elemental.cattle.io"
"seedimages.elemental.cattle.io"
"metadata.elemental.cattle.io"
-}}
{{- range $index, $crd := $crds -}}
{{- $obj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" $.Release.Namespace $crd -}}
{{- if and $obj $obj.metadata.deletionTimestamp -}}
{{- required "CRDs from previous installations are pending to be removed (deletionTimestamp is set). Fully deleting them before (re-)installing is required" "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/release-name: elemental-operator-crds
apiVersion: v2
appVersion: 1.6.5
description: A Helm chart for deploying Rancher Elemental Operator CRDs
name: elemental-crd
type: application
version: 104.2.1+up1.6.5

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
{{- $crds := list
"machineinventories.elemental.cattle.io"
"machineinventoryselectors.elemental.cattle.io"
"machineinventoryselectortemplates.elemental.cattle.io"
"machineregistrations.elemental.cattle.io"
"managedosimages.elemental.cattle.io"
"managedosversionchannels.elemental.cattle.io"
"managedosversions.elemental.cattle.io"
"seedimages.elemental.cattle.io"
"metadata.elemental.cattle.io"
-}}
{{- range $index, $crd := $crds -}}
{{- $obj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" $.Release.Namespace $crd -}}
{{- if and $obj $obj.metadata.deletionTimestamp -}}
{{- required "CRDs from previous installations are pending to be removed (deletionTimestamp is set). Fully deleting them before (re-)installing is required" "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: elemental-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Elemental
catalog.cattle.io/kube-version: '>= 1.23.0-0'
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/provides-gvr: elemental.cattle.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: elemental-operator
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.6.5
apiVersion: v2
appVersion: 1.6.5
description: Elemental provides Cloud Native OS Management for Cluster Nodes.
icon: https://raw.githubusercontent.com/rancher/elemental/main/logo/icon-elemental.svg
name: elemental
version: 103.4.1+up1.6.5

View File

@ -0,0 +1,5 @@
# Elemental Operator Helm Chart
This chart bootstraps an elemental-operator deployment on a [Rancher Manager](https://rancher.com/docs/rancher/) cluster using the [Helm](https://helm.sh) package manager.
Check out the [Elemental Operator Helm Chart documentation](https://elemental.docs.rancher.com/elementaloperatorchart-reference/) in the official [Elemental guide](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,5 @@
# Elemental
Elemental brings to Rancher the ability to install and manage the OS of bare metal and virtualized machines.
For more information on how to deploy an Elemental Cluster, follow the [official documentation](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,27 @@
questions:
- variable: channel.defaultChannel
default: "true"
description: "Provide an Elemental OS Channel container image"
label: Elemental OS Channel
type: boolean
show_subquestion_if: true
group: "Elemental OS Channel"
subquestions:
- variable: channel.image
default: "registry.suse.com/rancher/elemental-channel/sl-micro"
description: "Specify the Elemental OS channel: for air-gapped scenarios you need to provide your own OS channel image (see https://elemental.docs.rancher.com/airgap for detailed instructions)"
type: string
label: Elemental OS Channel Image
group: "Elemental OS Channel"
- variable: channel.tag
default: "6.0-baremetal"
description: "Specify Elemental OS channel image tag"
type: string
label: "Elemental OS Channel Tag"
group: "Elemental OS Channel"
- variable: debug
default: "false"
description: "Enable debug logging in the Elemental operator"
type: boolean
label: "Enable Debug Logging"
group: "Logging"

View File

@ -0,0 +1,17 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{ include "system_default_registry" . }}
{{- else if .Values.registryUrl -}}
{{- printf "%s/" .Values.registryUrl -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,9 @@
kind: APIService
apiVersion: management.cattle.io/v3
metadata:
name: {{ .Release.Name }}
spec:
secretName: elemental-operator
secretNamespace: {{ .Release.Namespace }}
pathPrefixes:
- /elemental/

View File

@ -0,0 +1,10 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elemental-capi-role
labels:
cluster.x-k8s.io/aggregate-to-manager: "true"
rules:
- apiGroups: ["elemental.cattle.io"]
resources: ["*"]
verbs: ["*"]

View File

@ -0,0 +1,13 @@
# Unstable channel for testing isv:Rancher:Elemental OBS projects
# it is only rendered if the registryUrl value includes a known OBS project reference
{{ if and (hasPrefix "registry.opensuse.org" .Values.registryUrl) (contains "isv/rancher/elemental" .Values.registryUrl) }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: unstable-testing-channel
namespace: fleet-default
spec:
options:
image: {{ .Values.registryUrl }}/rancher/elemental-unstable-channel:latest
type: custom
{{ end }}

View File

@ -0,0 +1,30 @@
{{ $defChannelName := "" }}
{{ if and .Values.channel .Values.channel.image .Values.channel.tag .Values.channel.name }}
{{ $defChannelName := .Values.channel.name }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ .Values.channel.name }}
namespace: fleet-default
spec:
options:
image: {{ .Values.channel.image }}:{{ .Values.channel.tag }}
type: custom
{{ end }}
# Keep pre-existing channels managed by Helm if they do not match with the current default
# this way if an upgrade introduces a new channel any pre-existing channel managed by Helm is not deleted
{{ range $index, $channel := (lookup "elemental.cattle.io/v1beta1" "ManagedOSVersionChannel" "fleet-default" "").items }}
{{ if and (eq (index $channel.metadata.labels "app.kubernetes.io/managed-by") "Helm") (ne $channel.metadata.name $defChannelName) }}
---
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ $channel.metadata.name }}
namespace: fleet-default
spec:
options:
image: {{ $channel.spec.options.image }}
type: custom
{{ end }}
{{ end }}

View File

@ -0,0 +1,268 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: '{{ .Release.Name }}'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
verbs:
- get
- list
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- seedimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- seedimages/status
verbs:
- get
- patch
- update
- apiGroups:
- fleet.cattle.io
resources:
- bundles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- management.cattle.io
resources:
- settings
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- create
- delete
- list
- watch

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: elemental-operator
template:
metadata:
labels:
app: elemental-operator
spec:
containers:
- env:
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
{{- end }}
name: {{ .Release.Name }}
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
image: {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
args:
- operator
{{- if .Values.debug }}
- --v=5
- --debug
{{- end }}
- --namespace
- {{ .Release.Namespace }}
- --operator-image
- {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
- --seedimage-image
- {{ template "registry_url" . }}{{ .Values.seedImage.repository }}:{{ .Values.seedImage.tag | default .Chart.AppVersion }}
- --seedimage-image-pullpolicy
- {{ .Values.seedImage.imagePullPolicy}}
serviceAccountName: {{ .Release.Name }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,16 @@
apiVersion: management.cattle.io/v3
builtin: false
description: "Elemental Administrator Role"
displayName: Elemental Administrator
kind: GlobalRole
metadata:
labels:
cattle.io/creator: norman
name: {{ .Release.Name }}
rules:
- apiGroups:
- elemental.cattle.io
resources:
- '*'
verbs:
- '*'

View File

@ -0,0 +1,10 @@
apiVersion: elemental.cattle.io/v1beta1
kind: Metadata
metadata:
name: {{ .Release.Name }}
spec:
appVersion: {{ .Chart.AppVersion }}
annotations:
{{- range $key, $value := .Chart.Annotations }}
{{ $key }}: {{ toYaml $value }}
{{- end }}

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}

View File

@ -0,0 +1,26 @@
{{ if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 }}
{{ $apis := dict
"elemental.cattle.io/v1beta1/MachineInventory" "machineinventories"
"elemental.cattle.io/v1beta1/MachineInventorySelector" "machineinventoryselectors"
"elemental.cattle.io/v1beta1/MachineInventorySelectorTemplate" "machineinventoryselectortemplates"
"elemental.cattle.io/v1beta1/MachineRegistration" "machineregistrations"
"elemental.cattle.io/v1beta1/ManagedOSImage" "managedosimages"
"elemental.cattle.io/v1beta1/ManagedOSVersionChannel" "managedosversionchannels"
"elemental.cattle.io/v1beta1/ManagedOSVersion" "managedosversions"
"elemental.cattle.io/v1beta1/SeedImage" "seedimages"
"elemental.cattle.io/v1beta1/Metadata" "metadata"
}}
{{- range $api, $crd := $apis -}}
{{- if not ($.Capabilities.APIVersions.Has $api) -}}
{{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
{{- end -}}
{{- $crdobj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" (print $crd ".elemental.cattle.io") -}}
{{- if not $crdobj -}}
{{- print "Cannot lookup " $crd ".elemental.cattle.io crd object" | fail -}}
{{- end -}}
{{- $crdrelease := index $crdobj.metadata.annotations "meta.helm.sh/release-name" -}}
{{- if eq $crdrelease $.Release.Name -}}
{{- required "Elemental CRDs should be moved to the new elemental-operator-crds chart before upgrading this operator." "" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,43 @@
image:
empty: rancher/pause:3.1
repository: "rancher/mirrored-elemental-operator"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
seedImage:
repository: "rancher/mirrored-elemental-seedimage-builder"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
channel:
name: "sl-micro-6.0-baremetal-channel"
image: "registry.suse.com/rancher/elemental-channel/sl-micro"
tag: "6.0-baremetal"
# number of operator replicas to deploy
replicas: 1
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
# used only if systemDefaultRegistry is empty
registryUrl: ""
# enable debug output for operator
debug: false
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: elemental-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Elemental
catalog.cattle.io/kube-version: '>= 1.23.0-0'
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/provides-gvr: elemental.cattle.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: elemental-operator
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.6.5
apiVersion: v2
appVersion: 1.6.5
description: Elemental provides Cloud Native OS Management for Cluster Nodes.
icon: https://raw.githubusercontent.com/rancher/elemental/main/logo/icon-elemental.svg
name: elemental
version: 104.2.1+up1.6.5

View File

@ -0,0 +1,5 @@
# Elemental Operator Helm Chart
This chart bootstraps an elemental-operator deployment on a [Rancher Manager](https://rancher.com/docs/rancher/) cluster using the [Helm](https://helm.sh) package manager.
Check out the [Elemental Operator Helm Chart documentation](https://elemental.docs.rancher.com/elementaloperatorchart-reference/) in the official [Elemental guide](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,5 @@
# Elemental
Elemental brings to Rancher the ability to install and manage the OS of bare metal and virtualized machines.
For more information on how to deploy an Elemental Cluster, follow the [official documentation](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,27 @@
questions:
- variable: channel.defaultChannel
default: "true"
description: "Provide an Elemental OS Channel container image"
label: Elemental OS Channel
type: boolean
show_subquestion_if: true
group: "Elemental OS Channel"
subquestions:
- variable: channel.image
default: "registry.suse.com/rancher/elemental-channel/sl-micro"
description: "Specify the Elemental OS channel: for air-gapped scenarios you need to provide your own OS channel image (see https://elemental.docs.rancher.com/airgap for detailed instructions)"
type: string
label: Elemental OS Channel Image
group: "Elemental OS Channel"
- variable: channel.tag
default: "6.0-baremetal"
description: "Specify Elemental OS channel image tag"
type: string
label: "Elemental OS Channel Tag"
group: "Elemental OS Channel"
- variable: debug
default: "false"
description: "Enable debug logging in the Elemental operator"
type: boolean
label: "Enable Debug Logging"
group: "Logging"

View File

@ -0,0 +1,17 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{ include "system_default_registry" . }}
{{- else if .Values.registryUrl -}}
{{- printf "%s/" .Values.registryUrl -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,9 @@
kind: APIService
apiVersion: management.cattle.io/v3
metadata:
name: {{ .Release.Name }}
spec:
secretName: elemental-operator
secretNamespace: {{ .Release.Namespace }}
pathPrefixes:
- /elemental/

View File

@ -0,0 +1,10 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elemental-capi-role
labels:
cluster.x-k8s.io/aggregate-to-manager: "true"
rules:
- apiGroups: ["elemental.cattle.io"]
resources: ["*"]
verbs: ["*"]

View File

@ -0,0 +1,13 @@
# Unstable channel for testing isv:Rancher:Elemental OBS projects
# it is only rendered if the registryUrl value includes a known OBS project reference
{{ if and (hasPrefix "registry.opensuse.org" .Values.registryUrl) (contains "isv/rancher/elemental" .Values.registryUrl) }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: unstable-testing-channel
namespace: fleet-default
spec:
options:
image: {{ .Values.registryUrl }}/rancher/elemental-unstable-channel:latest
type: custom
{{ end }}

View File

@ -0,0 +1,30 @@
{{ $defChannelName := "" }}
{{ if and .Values.channel .Values.channel.image .Values.channel.tag .Values.channel.name }}
{{ $defChannelName := .Values.channel.name }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ .Values.channel.name }}
namespace: fleet-default
spec:
options:
image: {{ .Values.channel.image }}:{{ .Values.channel.tag }}
type: custom
{{ end }}
# Keep pre-existing channels managed by Helm if they do not match with the current default
# this way if an upgrade introduces a new channel any pre-existing channel managed by Helm is not deleted
{{ range $index, $channel := (lookup "elemental.cattle.io/v1beta1" "ManagedOSVersionChannel" "fleet-default" "").items }}
{{ if and (eq (index $channel.metadata.labels "app.kubernetes.io/managed-by") "Helm") (ne $channel.metadata.name $defChannelName) }}
---
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ $channel.metadata.name }}
namespace: fleet-default
spec:
options:
image: {{ $channel.spec.options.image }}
type: custom
{{ end }}
{{ end }}

View File

@ -0,0 +1,268 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: '{{ .Release.Name }}'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
verbs:
- get
- list
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- seedimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- seedimages/status
verbs:
- get
- patch
- update
- apiGroups:
- fleet.cattle.io
resources:
- bundles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- management.cattle.io
resources:
- settings
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- create
- delete
- list
- watch

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: elemental-operator
template:
metadata:
labels:
app: elemental-operator
spec:
containers:
- env:
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
{{- end }}
name: {{ .Release.Name }}
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
image: {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
args:
- operator
{{- if .Values.debug }}
- --v=5
- --debug
{{- end }}
- --namespace
- {{ .Release.Namespace }}
- --operator-image
- {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
- --seedimage-image
- {{ template "registry_url" . }}{{ .Values.seedImage.repository }}:{{ .Values.seedImage.tag | default .Chart.AppVersion }}
- --seedimage-image-pullpolicy
- {{ .Values.seedImage.imagePullPolicy}}
serviceAccountName: {{ .Release.Name }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,16 @@
apiVersion: management.cattle.io/v3
builtin: false
description: "Elemental Administrator Role"
displayName: Elemental Administrator
kind: GlobalRole
metadata:
labels:
cattle.io/creator: norman
name: {{ .Release.Name }}
rules:
- apiGroups:
- elemental.cattle.io
resources:
- '*'
verbs:
- '*'

View File

@ -0,0 +1,10 @@
apiVersion: elemental.cattle.io/v1beta1
kind: Metadata
metadata:
name: {{ .Release.Name }}
spec:
appVersion: {{ .Chart.AppVersion }}
annotations:
{{- range $key, $value := .Chart.Annotations }}
{{ $key }}: {{ toYaml $value }}
{{- end }}

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}

View File

@ -0,0 +1,26 @@
{{ if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 }}
{{ $apis := dict
"elemental.cattle.io/v1beta1/MachineInventory" "machineinventories"
"elemental.cattle.io/v1beta1/MachineInventorySelector" "machineinventoryselectors"
"elemental.cattle.io/v1beta1/MachineInventorySelectorTemplate" "machineinventoryselectortemplates"
"elemental.cattle.io/v1beta1/MachineRegistration" "machineregistrations"
"elemental.cattle.io/v1beta1/ManagedOSImage" "managedosimages"
"elemental.cattle.io/v1beta1/ManagedOSVersionChannel" "managedosversionchannels"
"elemental.cattle.io/v1beta1/ManagedOSVersion" "managedosversions"
"elemental.cattle.io/v1beta1/SeedImage" "seedimages"
"elemental.cattle.io/v1beta1/Metadata" "metadata"
}}
{{- range $api, $crd := $apis -}}
{{- if not ($.Capabilities.APIVersions.Has $api) -}}
{{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
{{- end -}}
{{- $crdobj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" (print $crd ".elemental.cattle.io") -}}
{{- if not $crdobj -}}
{{- print "Cannot lookup " $crd ".elemental.cattle.io crd object" | fail -}}
{{- end -}}
{{- $crdrelease := index $crdobj.metadata.annotations "meta.helm.sh/release-name" -}}
{{- if eq $crdrelease $.Release.Name -}}
{{- required "Elemental CRDs should be moved to the new elemental-operator-crds chart before upgrading this operator." "" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,43 @@
image:
empty: rancher/pause:3.1
repository: "rancher/mirrored-elemental-operator"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
seedImage:
repository: "rancher/mirrored-elemental-seedimage-builder"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
channel:
name: "sl-micro-6.0-baremetal-channel"
image: "registry.suse.com/rancher/elemental-channel/sl-micro"
tag: "6.0-baremetal"
# number of operator replicas to deploy
replicas: 1
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
# used only if systemDefaultRegistry is empty
registryUrl: ""
# enable debug output for operator
debug: false
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.9.11
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 103.1.10+up0.9.11

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,13 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}",
"agentTLSMode": "{{.Values.agentTLSMode}}"
}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-agent
spec:
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
serviceAccountName: fleet-agent
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,28 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,67 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.9.11
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
#labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.21.5
debug: false
debugLevel: 0

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.10.5
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 104.1.1+up0.10.5

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}",
"agentTLSMode": "{{.Values.agentTLSMode}}"
{{ if .Values.garbageCollectionInterval }}
"garbageCollectionInterval": "{{.Values.garbageCollectionInterval}}"
{{ end }}
}

View File

@ -0,0 +1,108 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: fleet-agent
spec:
serviceName: fleet-agent
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
initContainers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent-register
command:
- fleetagent
- register
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /.kube
name: kube
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent-clusterstatus
command:
- fleetagent
- clusterstatus
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumes:
- name: kube
emptyDir: {}
serviceAccountName: fleet-agent
{{- if .Values.fleetAgent.hostNetwork }}
hostNetwork: true
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,28 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: fleet-agent
spec:
type: ClusterIP
clusterIP: None
selector:
app: fleet-agent

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,70 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.10.5
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
# labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## HostNetwork setting for the agent deployment.
## When set allows for provisioning of network related bundles (CNI configuration) in a cluster without CNI.
hostNetwork: false
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.29.0
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.9.11
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 103.1.10+up0.9.11

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.10.5
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 104.1.1+up0.10.5

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

Some files were not shown because too many files have changed in this diff Show More