[dev-v2.9] forward-port after 2.8.10 to dev 2.9 (#4736)

pull/4750/head
Nicholas openSUSE Software Engineer 2024-11-11 19:16:51 -03:00 committed by GitHub
parent 02974b1964
commit d7456fe4c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
149 changed files with 22233 additions and 57 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/release-name: elemental-operator-crds
apiVersion: v2
appVersion: 1.6.5
description: A Helm chart for deploying Rancher Elemental Operator CRDs
name: elemental-crd
type: application
version: 103.4.1+up1.6.5

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
{{- $crds := list
"machineinventories.elemental.cattle.io"
"machineinventoryselectors.elemental.cattle.io"
"machineinventoryselectortemplates.elemental.cattle.io"
"machineregistrations.elemental.cattle.io"
"managedosimages.elemental.cattle.io"
"managedosversionchannels.elemental.cattle.io"
"managedosversions.elemental.cattle.io"
"seedimages.elemental.cattle.io"
"metadata.elemental.cattle.io"
-}}
{{- range $index, $crd := $crds -}}
{{- $obj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" $.Release.Namespace $crd -}}
{{- if and $obj $obj.metadata.deletionTimestamp -}}
{{- required "CRDs from previous installations are pending to be removed (deletionTimestamp is set). Fully deleting them before (re-)installing is required" "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: elemental-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Elemental
catalog.cattle.io/kube-version: '>= 1.23.0-0'
catalog.cattle.io/namespace: cattle-elemental-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/provides-gvr: elemental.cattle.io/v1beta1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: elemental-operator
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 1.6.5
apiVersion: v2
appVersion: 1.6.5
description: Elemental provides Cloud Native OS Management for Cluster Nodes.
icon: https://raw.githubusercontent.com/rancher/elemental/main/logo/icon-elemental.svg
name: elemental
version: 103.4.1+up1.6.5

View File

@ -0,0 +1,5 @@
# Elemental Operator Helm Chart
This chart bootstraps an elemental-operator deployment on a [Rancher Manager](https://rancher.com/docs/rancher/) cluster using the [Helm](https://helm.sh) package manager.
Check out the [Elemental Operator Helm Chart documentation](https://elemental.docs.rancher.com/elementaloperatorchart-reference/) in the official [Elemental guide](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,5 @@
# Elemental
Elemental brings to Rancher the ability to install and manage the OS of bare metal and virtualized machines.
For more information on how to deploy an Elemental Cluster, follow the [official documentation](https://elemental.docs.rancher.com/).

View File

@ -0,0 +1,27 @@
questions:
- variable: channel.defaultChannel
default: "true"
description: "Provide an Elemental OS Channel container image"
label: Elemental OS Channel
type: boolean
show_subquestion_if: true
group: "Elemental OS Channel"
subquestions:
- variable: channel.image
default: "registry.suse.com/rancher/elemental-channel/sl-micro"
description: "Specify the Elemental OS channel: for air-gapped scenarios you need to provide your own OS channel image (see https://elemental.docs.rancher.com/airgap for detailed instructions)"
type: string
label: Elemental OS Channel Image
group: "Elemental OS Channel"
- variable: channel.tag
default: "6.0-baremetal"
description: "Specify Elemental OS channel image tag"
type: string
label: "Elemental OS Channel Tag"
group: "Elemental OS Channel"
- variable: debug
default: "false"
description: "Enable debug logging in the Elemental operator"
type: boolean
label: "Enable Debug Logging"
group: "Logging"

View File

@ -0,0 +1,17 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "registry_url" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{ include "system_default_registry" . }}
{{- else if .Values.registryUrl -}}
{{- printf "%s/" .Values.registryUrl -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,9 @@
kind: APIService
apiVersion: management.cattle.io/v3
metadata:
name: {{ .Release.Name }}
spec:
secretName: elemental-operator
secretNamespace: {{ .Release.Namespace }}
pathPrefixes:
- /elemental/

View File

@ -0,0 +1,10 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elemental-capi-role
labels:
cluster.x-k8s.io/aggregate-to-manager: "true"
rules:
- apiGroups: ["elemental.cattle.io"]
resources: ["*"]
verbs: ["*"]

View File

@ -0,0 +1,13 @@
# Unstable channel for testing isv:Rancher:Elemental OBS projects
# it is only rendered if the registryUrl value includes a known OBS project reference
{{ if and (hasPrefix "registry.opensuse.org" .Values.registryUrl) (contains "isv/rancher/elemental" .Values.registryUrl) }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: unstable-testing-channel
namespace: fleet-default
spec:
options:
image: {{ .Values.registryUrl }}/rancher/elemental-unstable-channel:latest
type: custom
{{ end }}

View File

@ -0,0 +1,30 @@
{{ $defChannelName := "" }}
{{ if and .Values.channel .Values.channel.image .Values.channel.tag .Values.channel.name }}
{{ $defChannelName := .Values.channel.name }}
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ .Values.channel.name }}
namespace: fleet-default
spec:
options:
image: {{ .Values.channel.image }}:{{ .Values.channel.tag }}
type: custom
{{ end }}
# Keep pre-existing channels managed by Helm if they do not match with the current default
# this way if an upgrade introduces a new channel any pre-existing channel managed by Helm is not deleted
{{ range $index, $channel := (lookup "elemental.cattle.io/v1beta1" "ManagedOSVersionChannel" "fleet-default" "").items }}
{{ if and (eq (index $channel.metadata.labels "app.kubernetes.io/managed-by") "Helm") (ne $channel.metadata.name $defChannelName) }}
---
apiVersion: elemental.cattle.io/v1beta1
kind: ManagedOSVersionChannel
metadata:
name: {{ $channel.metadata.name }}
namespace: fleet-default
spec:
options:
image: {{ $channel.spec.options.image }}
type: custom
{{ end }}
{{ end }}

View File

@ -0,0 +1,268 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: '{{ .Release.Name }}'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
verbs:
- get
- list
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventories/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineinventoryselectors/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- machineregistrations/status
verbs:
- get
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosimages/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversionchannels/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- managedosversions/status
verbs:
- get
- list
- patch
- update
- apiGroups:
- elemental.cattle.io
resources:
- seedimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- elemental.cattle.io
resources:
- seedimages/status
verbs:
- get
- patch
- update
- apiGroups:
- fleet.cattle.io
resources:
- bundles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- management.cattle.io
resources:
- settings
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- create
- delete
- list
- watch

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: elemental-operator
template:
metadata:
labels:
app: elemental-operator
spec:
containers:
- env:
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
{{- end }}
name: {{ .Release.Name }}
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
image: {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
args:
- operator
{{- if .Values.debug }}
- --v=5
- --debug
{{- end }}
- --namespace
- {{ .Release.Namespace }}
- --operator-image
- {{ template "registry_url" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
- --seedimage-image
- {{ template "registry_url" . }}{{ .Values.seedImage.repository }}:{{ .Values.seedImage.tag | default .Chart.AppVersion }}
- --seedimage-image-pullpolicy
- {{ .Values.seedImage.imagePullPolicy}}
serviceAccountName: {{ .Release.Name }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,16 @@
apiVersion: management.cattle.io/v3
builtin: false
description: "Elemental Administrator Role"
displayName: Elemental Administrator
kind: GlobalRole
metadata:
labels:
cattle.io/creator: norman
name: {{ .Release.Name }}
rules:
- apiGroups:
- elemental.cattle.io
resources:
- '*'
verbs:
- '*'

View File

@ -0,0 +1,10 @@
apiVersion: elemental.cattle.io/v1beta1
kind: Metadata
metadata:
name: {{ .Release.Name }}
spec:
appVersion: {{ .Chart.AppVersion }}
annotations:
{{- range $key, $value := .Chart.Annotations }}
{{ $key }}: {{ toYaml $value }}
{{- end }}

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}

View File

@ -0,0 +1,26 @@
{{ if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 }}
{{ $apis := dict
"elemental.cattle.io/v1beta1/MachineInventory" "machineinventories"
"elemental.cattle.io/v1beta1/MachineInventorySelector" "machineinventoryselectors"
"elemental.cattle.io/v1beta1/MachineInventorySelectorTemplate" "machineinventoryselectortemplates"
"elemental.cattle.io/v1beta1/MachineRegistration" "machineregistrations"
"elemental.cattle.io/v1beta1/ManagedOSImage" "managedosimages"
"elemental.cattle.io/v1beta1/ManagedOSVersionChannel" "managedosversionchannels"
"elemental.cattle.io/v1beta1/ManagedOSVersion" "managedosversions"
"elemental.cattle.io/v1beta1/SeedImage" "seedimages"
"elemental.cattle.io/v1beta1/Metadata" "metadata"
}}
{{- range $api, $crd := $apis -}}
{{- if not ($.Capabilities.APIVersions.Has $api) -}}
{{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
{{- end -}}
{{- $crdobj := lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" (print $crd ".elemental.cattle.io") -}}
{{- if not $crdobj -}}
{{- print "Cannot lookup " $crd ".elemental.cattle.io crd object" | fail -}}
{{- end -}}
{{- $crdrelease := index $crdobj.metadata.annotations "meta.helm.sh/release-name" -}}
{{- if eq $crdrelease $.Release.Name -}}
{{- required "Elemental CRDs should be moved to the new elemental-operator-crds chart before upgrading this operator." "" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,43 @@
image:
empty: rancher/pause:3.1
repository: "rancher/mirrored-elemental-operator"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
seedImage:
repository: "rancher/mirrored-elemental-seedimage-builder"
tag: "1.6.5"
imagePullPolicy: IfNotPresent
channel:
name: "sl-micro-6.0-baremetal-channel"
image: "registry.suse.com/rancher/elemental-channel/sl-micro"
tag: "6.0-baremetal"
# number of operator replicas to deploy
replicas: 1
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
# used only if systemDefaultRegistry is empty
registryUrl: ""
# enable debug output for operator
debug: false
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.9.11
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 103.1.10+up0.9.11

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,13 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}",
"agentTLSMode": "{{.Values.agentTLSMode}}"
}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-agent
spec:
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
serviceAccountName: fleet-agent
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,28 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,67 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.9.11
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
#labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.21.5
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.9.11
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 103.1.10+up0.9.11

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.9.11
dependencies:
- condition: gitops.enabled
name: gitjob
repository: file://./charts/gitjob
description: Fleet Manager - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
version: 103.1.10+up0.9.11

View File

@ -0,0 +1,30 @@
# Fleet Helm Chart
Fleet is GitOps at scale. Fleet is designed to manage multiple clusters.
## What is Fleet?
* Cluster engine: Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Fleet focuses not only on the ability to scale, but it also gives users a high degree of control and visibility to monitor exactly what is installed on the cluster.
* Deployment management: Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, Kustomize, or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy all resources in the cluster. As a result, users can enjoy a high degree of control, consistency, and auditability of their clusters.
## Introduction
This chart deploys Fleet on a Kubernetes cluster. It also deploys some of its dependencies as subcharts.
The documentation is centralized in the [doc website](https://fleet.rancher.io/).
## Prerequisites
Get helm if you don't have it. Helm 3 is just a CLI.
## Install Fleet
Install the Fleet Helm charts (there are two because we separate out CRDs for ultimate flexibility.):
```
$ helm repo add fleet https://rancher.github.io/fleet-helm-charts/
$ helm -n cattle-fleet-system install --create-namespace --wait fleet-crd fleet/fleet-crd
$ helm -n cattle-fleet-system install --create-namespace --wait fleet fleet/fleet
```

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
appVersion: 0.9.17
description: Controller that run jobs based on git events
name: gitjob
version: 0.9.17

View File

@ -0,0 +1,7 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,38 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gitjob
rules:
- apiGroups:
- "batch"
resources:
- 'jobs'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'pods'
verbs:
- 'list'
- 'get'
- 'watch'
- apiGroups:
- ""
resources:
- 'secrets'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'configmaps'
verbs:
- '*'
- apiGroups:
- "gitjob.cattle.io"
resources:
- "gitjobs"
- "gitjobs/status"
verbs:
- "*"

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitjob-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitjob
spec:
selector:
matchLabels:
app: "gitjob"
template:
metadata:
labels:
app: "gitjob"
spec:
serviceAccountName: gitjob
containers:
- image: "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
name: gitjob
args:
- gitjob
- --gitjob-image
- "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
{{- if .Values.debug }}
- --debug
{{- end }}
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}

View File

@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitjob
rules:
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitjob
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: gitjob
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: "gitjob"

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitjob

View File

@ -0,0 +1,27 @@
gitjob:
repository: rancher/gitjob
tag: v0.9.17
global:
cattle:
systemDefaultRegistry: ""
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
# PriorityClassName assigned to deployment.
priorityClassName: ""
debug: false

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fleet-controller
data:
config: |
{
"systemDefaultRegistry": "{{ template "system_default_registry" . }}",
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
"apiServerURL": "{{.Values.apiServerURL}}",
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
"agentTLSMode": "{{.Values.agentTLSMode}}",
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
"bootstrap": {
"paths": "{{.Values.bootstrap.paths}}",
"repo": "{{.Values.bootstrap.repo}}",
"secret": "{{.Values.bootstrap.secret}}",
"branch": "{{.Values.bootstrap.branch}}",
"namespace": "{{.Values.bootstrap.namespace}}",
"agentNamespace": "{{.Values.bootstrap.agentNamespace}}",
},
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
}

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-controller
spec:
selector:
matchLabels:
app: fleet-controller
template:
metadata:
labels:
app: fleet-controller
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS
value: {{ quote .Values.propagateDebugSettingsToAgents }}
{{- if .Values.clusterEnqueueDelay }}
- name: FLEET_CLUSTER_ENQUEUE_DELAY
value: {{ .Values.clusterEnqueueDelay }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_DIR
value: /tmp/pprof/
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_PERIOD
value: {{ quote .Values.cpuPprof.period }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
name: fleet-controller
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
command:
- fleetcontroller
{{- if not .Values.gitops.enabled }}
- --disable-gitops
{{- end }}
{{- if not .Values.bootstrap.enabled }}
- --disable-bootstrap
{{- end }}
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
{{- if .Values.cpuPprof }}
- mountPath: /tmp/pprof
name: pprof
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- if .Values.cpuPprof }}
- name: pprof {{ toYaml .Values.cpuPprof.volumeConfiguration | nindent 10 }}
{{- end }}
serviceAccountName: fleet-controller
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if .Values.migrations.clusterRegistrationCleanup }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: fleet-cleanup-clusterregistrations
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: fleet-controller
restartPolicy: Never
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
privileged: false
command:
- fleet
args:
- cleanup
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,114 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller
rules:
- apiGroups:
- gitjob.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- fleet.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: fleet-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller-bootstrap
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller-bootstrap
subjects:
- kind: ServiceAccount
name: fleet-controller-bootstrap
namespace: {{.Release.Namespace}}
{{- end }}

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller-bootstrap
{{- end }}

View File

@ -0,0 +1,87 @@
image:
repository: rancher/fleet
tag: v0.9.11
imagePullPolicy: IfNotPresent
agentImage:
repository: rancher/fleet-agent
tag: v0.9.11
imagePullPolicy: IfNotPresent
# For cluster registration the public URL of the Kubernetes API server must be set here
# Example: https://example.com:6443
apiServerURL: ""
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# A duration string for how often agents should report a heartbeat
agentCheckinInterval: "15m"
# Whether you want to allow cluster upon registration to specify their labels.
ignoreClusterRegistrationLabels: false
# Counts from gitrepo are out of sync with bundleDeployment state.
# Just retry in a number of seconds as there is no great way to trigger an event that doesn't cause a loop.
# If not set default is 15 seconds.
# clusterEnqueueDelay: 120s
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
bootstrap:
enabled: true
# The namespace that will be autocreated and the local cluster will be registered in
namespace: fleet-local
# The namespace where the fleet agent for the local cluster will be ran, if empty
# this will default to cattle-fleet-system
agentNamespace: ""
# A repo to add at install time that will deploy to the local cluster. This allows
# one to fully bootstrap fleet, its configuration and all its downstream clusters
# in one shot.
repo: ""
secret: ""
branch: master
paths: ""
global:
cattle:
systemDefaultRegistry: ""
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""
gitops:
enabled: true
debug: false
debugLevel: 0
propagateDebugSettingsToAgents: true
## Optional CPU pprof configuration. Profiles are collected continuously and saved every period
## Any valid volume configuration can be provided, the example below uses hostPath
#cpuPprof:
# period: "60s"
# volumeConfiguration:
# hostPath:
# path: /tmp/pprof
# type: DirectoryOrCreate
migrations:
clusterRegistrationCleanup: true

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/release-name: rancher-cis-benchmark-crd
apiVersion: v1
description: Installs the CRDs for rancher-cis-benchmark.
name: rancher-cis-benchmark-crd
type: application
version: 5.6.0

View File

@ -0,0 +1,2 @@
# rancher-cis-benchmark-crd
A Rancher chart that installs the CRDs used by rancher-cis-benchmark.

View File

@ -0,0 +1,148 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscans.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScan
plural: clusterscans
scope: Cluster
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- jsonPath: .status.lastRunScanProfileName
name: ClusterScanProfile
type: string
- jsonPath: .status.summary.total
name: Total
type: string
- jsonPath: .status.summary.pass
name: Pass
type: string
- jsonPath: .status.summary.fail
name: Fail
type: string
- jsonPath: .status.summary.skip
name: Skip
type: string
- jsonPath: .status.summary.warn
name: Warn
type: string
- jsonPath: .status.summary.notApplicable
name: Not Applicable
type: string
- jsonPath: .status.lastRunTimestamp
name: LastRunTimestamp
type: string
- jsonPath: .spec.scheduledScanConfig.cronSchedule
name: CronSchedule
type: string
subresources:
status: {}
schema:
openAPIV3Schema:
properties:
spec:
properties:
scanProfileName:
nullable: true
type: string
scheduledScanConfig:
nullable: true
properties:
cronSchedule:
nullable: true
type: string
retentionCount:
type: integer
scanAlertRule:
nullable: true
properties:
alertOnComplete:
type: boolean
alertOnFailure:
type: boolean
type: object
type: object
scoreWarning:
enum:
- pass
- fail
nullable: true
type: string
type: object
status:
properties:
NextScanAt:
nullable: true
type: string
ScanAlertingRuleName:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
display:
nullable: true
properties:
error:
type: boolean
message:
nullable: true
type: string
state:
nullable: true
type: string
transitioning:
type: boolean
type: object
lastRunScanProfileName:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
observedGeneration:
type: integer
summary:
nullable: true
properties:
fail:
type: integer
notApplicable:
type: integer
pass:
type: integer
skip:
type: integer
total:
type: integer
warn:
type: integer
type: object
type: object
type: object

View File

@ -0,0 +1,54 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanbenchmarks.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanBenchmark
plural: clusterscanbenchmarks
scope: Cluster
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- jsonPath: .spec.clusterProvider
name: ClusterProvider
type: string
- jsonPath: .spec.minKubernetesVersion
name: MinKubernetesVersion
type: string
- jsonPath: .spec.maxKubernetesVersion
name: MaxKubernetesVersion
type: string
- jsonPath: .spec.customBenchmarkConfigMapName
name: customBenchmarkConfigMapName
type: string
- jsonPath: .spec.customBenchmarkConfigMapNamespace
name: customBenchmarkConfigMapNamespace
type: string
subresources:
status: {}
schema:
openAPIV3Schema:
properties:
spec:
properties:
clusterProvider:
nullable: true
type: string
customBenchmarkConfigMapName:
nullable: true
type: string
customBenchmarkConfigMapNamespace:
nullable: true
type: string
maxKubernetesVersion:
nullable: true
type: string
minKubernetesVersion:
nullable: true
type: string
type: object
type: object

View File

@ -0,0 +1,36 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanprofiles.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanProfile
plural: clusterscanprofiles
scope: Cluster
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
skipTests:
items:
nullable: true
type: string
nullable: true
type: array
type: object
type: object
additionalPrinterColumns:
- jsonPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string

View File

@ -0,0 +1,39 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanreports.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanReport
plural: clusterscanreports
scope: Cluster
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- jsonPath: .spec.lastRunTimestamp
name: LastRunTimestamp
type: string
- jsonPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
subresources:
status: {}
schema:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
reportJSON:
nullable: true
type: string
type: object
type: object

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: rancher-cis-benchmark-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: CIS Benchmark
catalog.cattle.io/kube-version: '>= 1.25.0-0 < 1.29.0-0'
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-cis-benchmark
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-cis-benchmark
apiVersion: v1
appVersion: v5.6.0
description: The cis-operator enables running CIS benchmark security scans on a kubernetes
cluster
icon: https://charts.rancher.io/assets/logos/cis-kube-bench.svg
keywords:
- security
name: rancher-cis-benchmark
version: 5.6.0

View File

@ -0,0 +1,9 @@
# Rancher CIS Benchmark Chart
The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports that can be downloaded.
# Installation
```
helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system
```

View File

@ -0,0 +1,55 @@
# Rancher CIS Benchmarks
This chart enables security scanning of the cluster using [CIS (Center for Internet Security) benchmarks](https://www.cisecurity.org/benchmark/kubernetes/).
For more information on how to use the feature, refer to our [docs](https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/cis-scan-guides).
This chart installs the following components:
- [cis-operator](https://github.com/rancher/cis-operator) - The cis-operator handles launching the [kube-bench](https://github.com/aquasecurity/kube-bench) tool that runs a suite of CIS tests on the nodes of your Kubernetes cluster. After scans finish, the cis-operator generates a compliance report that can be downloaded.
- Scans - A scan is a CRD (`ClusterScan`) that defines when to trigger CIS scans on the cluster based on the defined profile. A report is created after the scan is completed.
- Profiles - A profile is a CRD (`ClusterScanProfile`) that defines the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. This chart installs a few default `ClusterScanProfile` custom resources with no skipped tests, which can immediately be used to launch CIS scans.
- Benchmark Versions - A benchmark version is a CRD (`ClusterScanBenchmark`) that defines the CIS benchmark version to run using kube-bench as well as the valid configuration parameters for that benchmark. This chart installs a few default `ClusterScanBenchmark` custom resources.
- Alerting Resources - Rancher's CIS Benchmark application lets you run a cluster scan on a schedule, and send alerts when scans finish.
- If you want to enable alerts to be delivered when a cluster scan completes, you need to ensure that [Rancher's Monitoring and Alerting](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/) application is pre-installed and the [Receivers and Routes](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/#alertmanager-config) are configured to send out alerts.
- Additionally, you need to set `alerts: true` in the Values YAML while installing or upgrading this chart.
## CIS Kubernetes Benchmark support
| Source | Kubernetes distribution | scan profile | Kubernetes versions |
|--------|-------------------------|--------------------------------------------------------------------------------------------------------------------|---------------------|
| CIS | any | [cis-1.7](https://github.com/rancher/security-scan/tree/master/package/cfg/cis-1.7) | v1.25 |
| CIS | any | [cis-1.8](https://github.com/rancher/security-scan/tree/master/package/cfg/cis-1.8) | v1.26+ |
| CIS | rke | [rke-cis-1.7-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/rke-cis-1.7-permissive) | rke1-v1.25 |
| CIS | rke | [rke-cis-1.7-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/rke-cis-1.7-hardened) | rke1-v1.25 |
| CIS | rke | [rke-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/rke-cis-1.8-permissive) | rke1-v1.26+ |
| CIS | rke | [rke-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/rke-cis-1.8-hardened) | rke1-v1.26+ |
| CIS | rke2 | [rke2-cis-1.7-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/rke2-cis-1.7-permissive)| rke2-v1.25 |
| CIS | rke2 | [rke2-cis-1.7-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/rke2-cis-1.7-hardened) | rke2-v1.25 |
| CIS | rke2 | [rke2-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/rke2-cis-1.8-permissive)| rke2-v1.26+ |
| CIS | rke2 | [rke2-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/rke2-cis-1.8-hardened) | rke2-v1.26+ |
| CIS | k3s | [k3s-cis-1.7-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/k3s-cis-1.7-permissive) | k3s-v1.25 |
| CIS | k3s | [k3s-cis-1.7-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/k3s-cis-1.7-hardened) | k3s-v1.25 |
| CIS | k3s | [k3s-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/master/package/cfg/k3s-cis-1.8-permissive) | k3s-v1.26+ |
| CIS | k3s | [k3s-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/master/package/cfg/k3s-cis-1.8-hardened) | k3s-v1.26+ |
| CIS | eks | eks-1.2.0 | eks |
| CIS | aks | aks-1.0 | aks |
| CIS | gke | gke-1.2.0 | gke |
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,27 @@
{{/* Ensure namespace is set the same everywhere */}}
{{- define "cis.namespace" -}}
{{- .Release.Namespace | default "cis-operator-system" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,14 @@
{{- if .Values.alerts.enabled -}}
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: rancher-cis-pod-monitor
namespace: {{ template "cis.namespace" . }}
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
podMetricsEndpoints:
- port: cismetrics
{{- end }}

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: aks-1.0
spec:
clusterProvider: aks
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.7
spec:
clusterProvider: ""
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.8
spec:
clusterProvider: ""
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: eks-1.2.0
spec:
clusterProvider: eks
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: gke-1.2.0
spec:
clusterProvider: gke
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.7-hardened
spec:
clusterProvider: k3s
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.7-permissive
spec:
clusterProvider: k3s
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.8-hardened
spec:
clusterProvider: k3s
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.8-permissive
spec:
clusterProvider: k3s
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.7-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.7-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.8-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.8-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.7-hardened
spec:
clusterProvider: rke2
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.7-permissive
spec:
clusterProvider: rke2
minKubernetesVersion: "1.25.0"
maxKubernetesVersion: "1.25.x"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.8-hardened
spec:
clusterProvider: rke2
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.8-permissive
spec:
clusterProvider: rke2
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,49 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-admin
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["create", "update", "delete", "patch","get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-view
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,18 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: default-clusterscanprofiles
namespace: {{ template "cis.namespace" . }}
data:
# Default ClusterScanProfiles per cluster provider type
rke: |-
<1.21.0: rke-profile-permissive-1.20
>=1.21.0: rke-profile-permissive-1.8
rke2: |-
<1.21.0: rke2-cis-1.20-profile-permissive
>=1.21.0: rke2-cis-1.8-profile-permissive
eks: "eks-profile"
gke: "gke-profile"
aks: "aks-profile"
k3s: "k3s-cis-1.8-profile-permissive"
default: "cis-1.8-profile"

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cis-operator
namespace: {{ template "cis.namespace" . }}
labels:
cis.cattle.io/operator: cis-operator
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
template:
metadata:
labels:
cis.cattle.io/operator: cis-operator
spec:
serviceAccountName: cis-operator-serviceaccount
containers:
- name: cis-operator
image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}'
imagePullPolicy: IfNotPresent
ports:
- name: cismetrics
containerPort: {{ .Values.alerts.metricsPort }}
env:
- name: SECURITY_SCAN_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }}
- name: SECURITY_SCAN_IMAGE_TAG
value: {{ .Values.image.securityScan.tag }}
- name: SONOBUOY_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }}
- name: SONOBUOY_IMAGE_TAG
value: {{ .Values.image.sonobuoy.tag }}
- name: CIS_ALERTS_METRICS_PORT
value: '{{ .Values.alerts.metricsPort }}'
- name: CIS_ALERTS_SEVERITY
value: {{ .Values.alerts.severity }}
- name: CIS_ALERTS_ENABLED
value: {{ .Values.alerts.enabled | default "false" | quote }}
- name: CLUSTER_NAME
value: '{{ .Values.global.cattle.clusterName }}'
- name: CIS_OPERATOR_DEBUG
value: '{{ .Values.image.cisoperator.debug }}'
{{- if .Values.securityScanJob.overrideTolerations }}
- name: SECURITY_SCAN_JOB_TOLERATIONS
value: '{{ .Values.securityScanJob.tolerations | toJson }}'
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ template "cis.namespace" . }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,29 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: cis-operator-serviceaccount
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ template "cis.namespace" . }}]
backoffLimit: 1

View File

@ -0,0 +1,59 @@
{{- if .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: cis-psp
spec:
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
fsGroup:
rule: RunAsAny
hostIPC: true
hostNetwork: true
hostPID: true
hostPorts:
- max: 65535
min: 0
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cis-psp-role
namespace: {{ template "cis.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- cis-psp
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cis-psp-rolebinding
namespace: {{ template "cis.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cis-psp-role
subjects:
- kind: ServiceAccount
name: cis-serviceaccount
namespace: {{ template "cis.namespace" . }}
- kind: ServiceAccount
name: cis-operator-serviceaccount
namespace: {{ template "cis.namespace" . }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More