[dev-v2.10] forward-port after 2.9.5 (#4898)

pull/4903/head
Nicholas openSUSE Software Engineer 2024-12-18 23:16:17 -03:00 committed by GitHub
parent 80a3ba212f
commit 90d583e5b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
726 changed files with 117374 additions and 16 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.10.7
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 104.1.3+up0.10.7

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}",
"agentTLSMode": "{{.Values.agentTLSMode}}"
{{ if .Values.garbageCollectionInterval }}
"garbageCollectionInterval": "{{.Values.garbageCollectionInterval}}"
{{ end }}
}

View File

@ -0,0 +1,108 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: fleet-agent
spec:
serviceName: fleet-agent
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
initContainers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent-register
command:
- fleetagent
- register
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /.kube
name: kube
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent-clusterstatus
command:
- fleetagent
- clusterstatus
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumes:
- name: kube
emptyDir: {}
serviceAccountName: fleet-agent
{{- if .Values.fleetAgent.hostNetwork }}
hostNetwork: true
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,28 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: fleet-agent
spec:
type: ClusterIP
clusterIP: None
selector:
app: fleet-agent

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,70 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.10.7
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
# labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## HostNetwork setting for the agent deployment.
## When set allows for provisioning of network related bundles (CNI configuration) in a cluster without CNI.
hostNetwork: false
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.29.0
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.10.7
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 104.1.3+up0.10.7

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,18 @@
annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.10.7
description: Fleet Manager - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
version: 104.1.3+up0.10.7

View File

@ -0,0 +1,30 @@
# Fleet Helm Chart
Fleet is GitOps at scale. Fleet is designed to manage multiple clusters.
## What is Fleet?
* Cluster engine: Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Fleet focuses not only on the ability to scale, but it also gives users a high degree of control and visibility to monitor exactly what is installed on the cluster.
* Deployment management: Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, Kustomize, or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy all resources in the cluster. As a result, users can enjoy a high degree of control, consistency, and auditability of their clusters.
## Introduction
This chart deploys Fleet on a Kubernetes cluster. It also deploys some of its dependencies as subcharts.
The documentation is centralized in the [doc website](https://fleet.rancher.io/).
## Prerequisites
Get helm if you don't have it. Helm 3 is just a CLI.
## Install Fleet
Install the Fleet Helm charts (there are two because we separate out CRDs for ultimate flexibility.):
```
$ helm repo add fleet https://rancher.github.io/fleet-helm-charts/
$ helm -n cattle-fleet-system install --create-namespace --wait fleet-crd fleet/fleet-crd
$ helm -n cattle-fleet-system install --create-namespace --wait fleet fleet/fleet
```

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,29 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fleet-controller
data:
config: |
{
"systemDefaultRegistry": "{{ template "system_default_registry" . }}",
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
"apiServerURL": "{{.Values.apiServerURL}}",
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
"agentTLSMode": "{{.Values.agentTLSMode}}",
{{ if .Values.garbageCollectionInterval }}
"garbageCollectionInterval": "{{.Values.garbageCollectionInterval}}",
{{ end }}
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
"bootstrap": {
"paths": "{{.Values.bootstrap.paths}}",
"repo": "{{.Values.bootstrap.repo}}",
"secret": "{{.Values.bootstrap.secret}}",
"branch": "{{.Values.bootstrap.branch}}",
"namespace": "{{.Values.bootstrap.namespace}}",
"agentNamespace": "{{.Values.bootstrap.agentNamespace}}"
},
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
}

View File

@ -0,0 +1,251 @@
{{- $shards := list (dict "id" "" "nodeSelector" dict) -}}
{{- $uniqueShards := list -}}
{{- if .Values.shards -}}
{{- range .Values.shards -}}
{{- if not (has .id $uniqueShards) -}}
{{- $shards = append $shards . -}}
{{- $uniqueShards = append $uniqueShards .id -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{ range $shard := $shards }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: "fleet-controller{{if $shard.id }}-shard-{{ $shard.id }}{{end}}"
spec:
selector:
matchLabels:
app: fleet-controller
template:
metadata:
labels:
app: fleet-controller
fleet.cattle.io/shard-id: "{{ $shard.id }}"
{{- if empty $shard.id }}
fleet.cattle.io/shard-default: "true"
{{- end }}
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if $.Values.clusterEnqueueDelay }}
- name: FLEET_CLUSTER_ENQUEUE_DELAY
value: {{ $.Values.clusterEnqueueDelay }}
{{- end }}
{{- if $.Values.proxy }}
- name: HTTP_PROXY
value: {{ $.Values.proxy }}
- name: HTTPS_PROXY
value: {{ $.Values.proxy }}
- name: NO_PROXY
value: {{ $.Values.noProxy }}
{{- end }}
{{- if $.Values.cpuPprof }}
- name: FLEET_CPU_PPROF_DIR
value: /tmp/pprof/
{{- end }}
{{- if $.Values.cpuPprof }}
- name: FLEET_CPU_PPROF_PERIOD
value: {{ quote $.Values.cpuPprof.period }}
{{- end }}
{{- if $.Values.leaderElection.leaseDuration }}
- name: CATTLE_ELECTION_LEASE_DURATION
value: {{$.Values.leaderElection.leaseDuration}}
{{- end }}
{{- if $.Values.leaderElection.retryPeriod }}
- name: CATTLE_ELECTION_RETRY_PERIOD
value: {{$.Values.leaderElection.retryPeriod}}
{{- end }}
{{- if $.Values.leaderElection.renewDeadline }}
- name: CATTLE_ELECTION_RENEW_DEADLINE
value: {{$.Values.leaderElection.renewDeadline}}
{{- end }}
{{- if $.Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- if $.Values.controller.reconciler.workers.bundle }}
- name: BUNDLE_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.bundle }}
{{- end }}
{{- if $.Values.controller.reconciler.workers.bundledeployment }}
- name: BUNDLEDEPLOYMENT_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.bundledeployment }}
{{- end }}
{{- if $.Values.extraEnv }}
{{ toYaml $.Values.extraEnv | indent 8}}
{{- end }}
image: '{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}'
name: fleet-controller
imagePullPolicy: "{{ $.Values.image.imagePullPolicy }}"
{{- if $.Values.metrics.enabled }}
ports:
- containerPort: 8080
name: metrics
{{- end }}
command:
- fleetcontroller
{{- if $shard.id }}
- --shard-id
- {{ quote $shard.id }}
{{- end }}
{{- if not $.Values.metrics.enabled }}
- --disable-metrics
{{- end }}
{{- if $.Values.debug }}
- --debug
- --debug-level
- {{ quote $.Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
{{- if $.Values.cpuPprof }}
- mountPath: /tmp/pprof
name: pprof
{{- end }}
{{- if not $shard.id }} # Only deploy cleanup and agent management through sharding-less deployment
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if $.Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- if $.Values.leaderElection.leaseDuration }}
- name: CATTLE_ELECTION_LEASE_DURATION
value: {{$.Values.leaderElection.leaseDuration}}
{{- end }}
{{- if $.Values.leaderElection.retryPeriod }}
- name: CATTLE_ELECTION_RETRY_PERIOD
value: {{$.Values.leaderElection.retryPeriod}}
{{- end }}
{{- if $.Values.leaderElection.renewDeadline }}
- name: CATTLE_ELECTION_RENEW_DEADLINE
value: {{$.Values.leaderElection.renewDeadline}}
{{- end }}
{{- if $.Values.controller.reconciler.workers.gitrepo }}
- name: GITREPO_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.gitrepo }}
{{- end }}
{{- if $.Values.controller.reconciler.workers.bundle }}
- name: BUNDLE_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.bundle }}
{{- end }}
{{- if $.Values.controller.reconciler.workers.bundledeployment }}
- name: BUNDLEDEPLOYMENT_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.bundledeployment }}
{{- end }}
image: '{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}'
name: fleet-cleanup
imagePullPolicy: "{{ $.Values.image.imagePullPolicy }}"
command:
- fleetcontroller
- cleanup
{{- if $.Values.debug }}
- --debug
- --debug-level
- {{ quote $.Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS
value: {{ quote $.Values.propagateDebugSettingsToAgents }}
{{- if $.Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- if $.Values.leaderElection.leaseDuration }}
- name: CATTLE_ELECTION_LEASE_DURATION
value: {{$.Values.leaderElection.leaseDuration}}
{{- end }}
{{- if $.Values.leaderElection.retryPeriod }}
- name: CATTLE_ELECTION_RETRY_PERIOD
value: {{$.Values.leaderElection.retryPeriod}}
{{- end }}
{{- if $.Values.leaderElection.renewDeadline }}
- name: CATTLE_ELECTION_RENEW_DEADLINE
value: {{$.Values.leaderElection.renewDeadline}}
{{- end }}
image: '{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}'
name: fleet-agentmanagement
imagePullPolicy: "{{ $.Values.image.imagePullPolicy }}"
command:
- fleetcontroller
- agentmanagement
{{- if not $.Values.bootstrap.enabled }}
- --disable-bootstrap
{{- end }}
{{- if $.Values.debug }}
- --debug
- --debug-level
- {{ quote $.Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- if $.Values.cpuPprof }}
- name: pprof {{ toYaml $.Values.cpuPprof.volumeConfiguration | nindent 10 }}
{{- end }}
serviceAccountName: fleet-controller
nodeSelector: {{ include "linux-node-selector" $shard.id | nindent 8 }}
{{- if $.Values.nodeSelector }}
{{ toYaml $.Values.nodeSelector | indent 8 }}
{{- end }}
{{- if $shard.nodeSelector -}}
{{- range $key, $value := $shard.nodeSelector }}
{{ $key | indent 8}}: {{ $value }}
{{- end }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" $shard.id | nindent 8 }}
{{- if $.Values.tolerations }}
{{ toYaml $.Values.tolerations | indent 8 }}
{{- end }}
{{- if $.Values.priorityClassName }}
priorityClassName: "{{$.Values.priorityClassName}}"
{{- end }}
{{- if not $.Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}
---
{{- end }}

View File

@ -0,0 +1,136 @@
{{- $shards := list (dict "id" "" "nodeSelector" dict) -}}
{{- $uniqueShards := list -}}
{{- if .Values.shards -}}
{{- range .Values.shards -}}
{{- if not (has .id $uniqueShards) -}}
{{- $shards = append $shards . -}}
{{- $uniqueShards = append $uniqueShards .id -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{ range $shard := $shards }}
{{- if $.Values.gitops.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: "gitjob{{if $shard.id }}-shard-{{ $shard.id }}{{end}}"
spec:
selector:
matchLabels:
app: "gitjob"
template:
metadata:
labels:
app: "gitjob"
fleet.cattle.io/shard-id: "{{ $shard.id }}"
{{- if empty $shard.id }}
fleet.cattle.io/shard-default: "true"
{{- end }}
spec:
serviceAccountName: gitjob
containers:
- image: "{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}"
name: gitjob
{{- if $.Values.metrics.enabled }}
ports:
- containerPort: 8081
name: metrics
{{- end }}
args:
- fleetcontroller
- gitjob
- --gitjob-image
- "{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}"
{{- if $.Values.debug }}
- --debug
- --debug-level
- {{ quote $.Values.debugLevel }}
{{- end }}
{{- if $shard.id }}
- --shard-id
- {{ quote $shard.id }}
{{- end }}
{{- if $shard.nodeSelector }}
- --shard-node-selector
- {{ toJson $shard.nodeSelector | squote }}
{{- end }}
{{- if not $.Values.metrics.enabled }}
- --disable-metrics
{{- end }}
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if $.Values.leaderElection.leaseDuration }}
- name: CATTLE_ELECTION_LEASE_DURATION
value: {{$.Values.leaderElection.leaseDuration}}
{{- end }}
{{- if $.Values.leaderElection.retryPeriod }}
- name: CATTLE_ELECTION_RETRY_PERIOD
value: {{$.Values.leaderElection.retryPeriod}}
{{- end }}
{{- if $.Values.leaderElection.renewDeadline }}
- name: CATTLE_ELECTION_RENEW_DEADLINE
value: {{$.Values.leaderElection.renewDeadline}}
{{- end }}
{{- if $.Values.proxy }}
- name: HTTP_PROXY
value: {{ $.Values.proxy }}
- name: HTTPS_PROXY
value: {{ $.Values.proxy }}
- name: NO_PROXY
value: {{ $.Values.noProxy }}
{{- end }}
{{- if $.Values.controller.reconciler.workers.gitrepo }}
- name: GITREPO_RECONCILER_WORKERS
value: {{ quote $.Values.controller.reconciler.workers.gitrepo }}
{{- end }}
{{- if $.Values.extraEnv }}
{{ toYaml $.Values.extraEnv | indent 12}}
{{- end }}
{{- if $.Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
nodeSelector: {{ include "linux-node-selector" $shard.id | nindent 8 }}
{{- if $.Values.nodeSelector }}
{{ toYaml $.Values.nodeSelector | indent 8 }}
{{- end }}
{{- if $shard.nodeSelector -}}
{{- range $key, $value := $shard.nodeSelector }}
{{ $key | indent 8}}: {{ $value }}
{{- end }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" $shard.id | nindent 8 }}
{{- if $.Values.tolerations }}
{{ toYaml $.Values.tolerations | indent 8 }}
{{- end }}
{{- if $.Values.priorityClassName }}
priorityClassName: "{{$.Values.priorityClassName}}"
{{- end }}
{{- if not $.Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- end }}
---
{{- end }}

View File

@ -0,0 +1,41 @@
{{- if .Values.migrations.clusterRegistrationCleanup }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: fleet-cleanup-clusterregistrations
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: fleet-controller
restartPolicy: Never
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
privileged: false
command:
- fleet
args:
- cleanup
- clusterregistration
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,44 @@
{{- if and .Values.migrations.gitrepoJobsCleanup .Values.gitops.enabled }}
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: fleet-cleanup-gitrepo-jobs
spec:
schedule: "@daily"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 0
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: gitjob
restartPolicy: Never
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
privileged: false
command:
- fleet
args:
- cleanup
- gitjob
nodeSelector: {{ include "linux-node-selector" . | nindent 12 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 12 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,113 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller
rules:
- apiGroups:
- fleet.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'events'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: fleet-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller-bootstrap
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller-bootstrap
subjects:
- kind: ServiceAccount
name: fleet-controller-bootstrap
namespace: {{.Release.Namespace}}
{{- end }}

View File

@ -0,0 +1,133 @@
{{- if .Values.gitops.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gitjob
rules:
- apiGroups:
- "batch"
resources:
- 'jobs'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'pods'
verbs:
- 'list'
- 'get'
- 'watch'
- apiGroups:
- ""
resources:
- 'secrets'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'configmaps'
verbs:
- '*'
- apiGroups:
- "fleet.cattle.io"
resources:
- "gitrepos"
- "gitrepos/status"
verbs:
- "*"
- apiGroups:
- "fleet.cattle.io"
resources:
- "gitreporestrictions"
verbs:
- list
- get
- watch
- apiGroups:
- "fleet.cattle.io"
resources:
- "bundles"
- "bundledeployments"
- "imagescans"
- "contents"
verbs:
- list
- delete
- get
- watch
- update
- apiGroups:
- ""
resources:
- 'events'
verbs:
- '*'
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- "create"
- apiGroups:
- ""
resources:
- namespaces
verbs:
- "create"
- "delete"
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- escalate
- create
- bind
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitjob-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitjob
rules:
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitjob
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
{{- end }}

View File

@ -0,0 +1,36 @@
{{- if .Values.metrics.enabled }}
{{- $shards := list (dict "id" "" "nodeSelector" dict) -}}
{{- $uniqueShards := list -}}
{{- if .Values.shards -}}
{{- range .Values.shards -}}
{{- if not (has .id $uniqueShards) -}}
{{- $shards = append $shards . -}}
{{- $uniqueShards = append $uniqueShards .id -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{ range $shard := $shards }}
apiVersion: v1
kind: Service
metadata:
name: "monitoring-fleet-controller{{if $shard.id }}-shard-{{ $shard.id }}{{end}}"
labels:
app: fleet-controller
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: metrics
selector:
app: fleet-controller
{{- if empty $shard.id }}
fleet.cattle.io/shard-default: "true"
{{- else }}
fleet.cattle.io/shard-id: "{{ $shard.id }}"
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if .Values.gitops.enabled }}
apiVersion: v1
kind: Service
metadata:
name: gitjob
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: "gitjob"
---
{{- if .Values.metrics.enabled }}
{{- $shards := list (dict "id" "" "nodeSelector" dict) -}}
{{- $uniqueShards := list -}}
{{- if .Values.shards -}}
{{- range .Values.shards -}}
{{- if not (has .id $uniqueShards) -}}
{{- $shards = append $shards . -}}
{{- $uniqueShards = append $uniqueShards .id -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{ range $shard := $shards }}
apiVersion: v1
kind: Service
metadata:
name: "monitoring-gitjob{{if $shard.id }}-shard-{{ $shard.id }}{{end}}"
labels:
app: gitjob
spec:
type: ClusterIP
ports:
- port: 8081
targetPort: 8081
protocol: TCP
name: metrics
selector:
app: gitjob
{{- if empty $shard.id }}
fleet.cattle.io/shard-default: "true"
{{- else }}
fleet.cattle.io/shard-id: "{{ $shard.id }}"
{{- end }}
---
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller-bootstrap
{{- end }}

View File

@ -0,0 +1,6 @@
{{- if .Values.gitops.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitjob
{{- end }}

View File

@ -0,0 +1,121 @@
image:
repository: rancher/fleet
tag: v0.10.7
imagePullPolicy: IfNotPresent
agentImage:
repository: rancher/fleet-agent
tag: v0.10.7
imagePullPolicy: IfNotPresent
# For cluster registration the public URL of the Kubernetes API server must be set here
# Example: https://example.com:6443
apiServerURL: ""
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# Determines whether the agent should trust CA bundles from the operating system's trust store when connecting to a
# management cluster. True in `system-store` mode, false in `strict` mode.
agentTLSMode: "system-store"
# A duration string for how often agents should report a heartbeat
agentCheckinInterval: "15m"
# Whether you want to allow cluster upon registration to specify their labels.
ignoreClusterRegistrationLabels: false
# Counts from gitrepo are out of sync with bundleDeployment state.
# Just retry in a number of seconds as there is no great way to trigger an event that doesn't cause a loop.
# If not set default is 15 seconds.
# clusterEnqueueDelay: 120s
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
bootstrap:
enabled: true
# The namespace that will be autocreated and the local cluster will be registered in
namespace: fleet-local
# The namespace where the fleet agent for the local cluster will be ran, if empty
# this will default to cattle-fleet-system
agentNamespace: ""
# A repo to add at install time that will deploy to the local cluster. This allows
# one to fully bootstrap fleet, its configuration and all its downstream clusters
# in one shot.
repo: ""
secret: ""
branch: master
paths: ""
global:
cattle:
systemDefaultRegistry: ""
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""
gitops:
enabled: true
metrics:
enabled: true
debug: false
debugLevel: 0
propagateDebugSettingsToAgents: true
## Optional CPU pprof configuration. Profiles are collected continuously and saved every period
## Any valid volume configuration can be provided, the example below uses hostPath
# cpuPprof:
# period: "60s"
# volumeConfiguration:
# hostPath:
# path: /tmp/pprof
# type: DirectoryOrCreate
migrations:
clusterRegistrationCleanup: true
gitrepoJobsCleanup: true
## Leader election configuration
leaderElection:
leaseDuration: 30s
retryPeriod: 10s
renewDeadline: 25s
## Fleet controller configuration
controller:
reconciler:
# The number of workers that are allowed to each type of reconciler
workers:
gitrepo: "50"
bundle: "50"
bundledeployment: "50"
# Extra environment variables passed to the fleet pods.
# extraEnv:
# - name: EXPERIMENTAL_OCI_STORAGE
# value: "true"
# shards:
# - id: shard0
# nodeSelector:
# kubernetes.io/hostname: k3d-upstream-server-0
# - id: shard1
# nodeSelector:
# kubernetes.io/hostname: k3d-upstream-server-1
# - id: shard2
# nodeSelector:
# kubernetes.io/hostname: k3d-upstream-server-2

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-aks-operator-crd
apiVersion: v2
appVersion: 1.9.5
description: AKS Operator CustomResourceDefinitions
name: rancher-aks-operator-crd
version: 104.5.0+up1.9.5

View File

@ -0,0 +1,211 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: aksclusterconfigs.aks.cattle.io
spec:
group: aks.cattle.io
names:
kind: AKSClusterConfig
plural: aksclusterconfigs
shortNames:
- akscc
singular: aksclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
authBaseUrl:
nullable: true
type: string
authorizedIpRanges:
items:
nullable: true
type: string
nullable: true
type: array
azureCredentialSecret:
nullable: true
type: string
baseUrl:
nullable: true
type: string
clusterName:
nullable: true
type: string
dnsPrefix:
nullable: true
type: string
dnsServiceIp:
nullable: true
type: string
dockerBridgeCidr:
nullable: true
type: string
httpApplicationRouting:
nullable: true
type: boolean
imported:
type: boolean
kubernetesVersion:
nullable: true
type: string
linuxAdminUsername:
nullable: true
type: string
loadBalancerSku:
nullable: true
type: string
logAnalyticsWorkspaceGroup:
nullable: true
type: string
logAnalyticsWorkspaceName:
nullable: true
type: string
managedIdentity:
nullable: true
type: boolean
monitoring:
nullable: true
type: boolean
networkPlugin:
nullable: true
type: string
networkPolicy:
nullable: true
type: string
nodePools:
items:
properties:
availabilityZones:
items:
nullable: true
type: string
nullable: true
type: array
count:
nullable: true
type: integer
enableAutoScaling:
nullable: true
type: boolean
maxCount:
nullable: true
type: integer
maxPods:
nullable: true
type: integer
maxSurge:
nullable: true
type: string
minCount:
nullable: true
type: integer
mode:
nullable: true
type: string
name:
nullable: true
type: string
nodeLabels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
nodeTaints:
items:
nullable: true
type: string
nullable: true
type: array
orchestratorVersion:
nullable: true
type: string
osDiskSizeGB:
nullable: true
type: integer
osDiskType:
nullable: true
type: string
osType:
nullable: true
type: string
vmSize:
nullable: true
type: string
vnetSubnetID:
nullable: true
type: string
type: object
nullable: true
type: array
nodeResourceGroup:
nullable: true
type: string
outboundType:
nullable: true
type: string
podCidr:
nullable: true
type: string
privateCluster:
nullable: true
type: boolean
privateDnsZone:
nullable: true
type: string
resourceGroup:
nullable: true
type: string
resourceLocation:
nullable: true
type: string
serviceCidr:
nullable: true
type: string
sshPublicKey:
nullable: true
type: string
subnet:
nullable: true
type: string
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
userAssignedIdentity:
nullable: true
type: string
virtualNetwork:
nullable: true
type: string
virtualNetworkResourceGroup:
nullable: true
type: string
type: object
status:
properties:
failureMessage:
nullable: true
type: string
phase:
nullable: true
type: string
rbacEnabled:
nullable: true
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-aks-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.26.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: aksclusterconfigs.aks.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: rancher-aks-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.9.5
description: A Helm chart for provisioning AKS clusters
home: https://github.com/rancher/aks-operator
name: rancher-aks-operator
sources:
- https://github.com/rancher/aks-operator
version: 104.5.0+up1.9.5

View File

@ -0,0 +1,4 @@
You have deployed the Rancher AKS operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions AKS clusters
from AKSClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aks-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch', 'update']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs/status']
verbs: ['update']

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aks-operator
namespace: cattle-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aks-operator
subjects:
- kind: ServiceAccount
name: aks-operator
namespace: cattle-system

View File

@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aks-config-operator
namespace: cattle-system
spec:
replicas: 1
selector:
matchLabels:
ke.cattle.io/operator: aks
template:
metadata:
labels:
ke.cattle.io/operator: aks
spec:
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
serviceAccountName: aks-operator
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
securityContext:
fsGroup: 1007
runAsUser: 1007
containers:
- name: aks-operator
image: '{{ template "system_default_registry" $ }}{{ $.Values.aksOperator.image.repository }}:{{ $.Values.aksOperator.image.tag }}'
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PROXY
value: {{ .Values.httpProxy }}
- name: HTTPS_PROXY
value: {{ .Values.httpsProxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- if .Values.additionalTrustedCAs }}
# aks-operator mounts the additional CAs in two places:
volumeMounts:
# This directory is owned by the aks-operator user so c_rehash works here.
- mountPath: /etc/rancher/ssl/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
# This directory is root-owned so c_rehash doesn't work here,
# but the cert is here in case update-ca-certificates is called in the future or by the OS.
- mountPath: /etc/pki/trust/anchors/ca-additional.pem
name: tls-ca-additional-volume
subPath: ca-additional.pem
readOnly: true
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 0400
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: cattle-system
name: aks-operator

View File

@ -0,0 +1,23 @@
global:
cattle:
systemDefaultRegistry: ""
aksOperator:
image:
repository: rancher/aks-operator
tag: v1.9.5
httpProxy: ""
httpsProxy: ""
noProxy: ""
additionalTrustedCAs: false
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/release-name: rancher-cis-benchmark-crd
apiVersion: v1
description: Installs the CRDs for rancher-cis-benchmark.
name: rancher-cis-benchmark-crd
type: application
version: 6.5.1

View File

@ -0,0 +1,2 @@
# rancher-cis-benchmark-crd
A Rancher chart that installs the CRDs used by rancher-cis-benchmark.

View File

@ -0,0 +1,149 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscans.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScan
plural: clusterscans
singular: clusterscan
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.lastRunScanProfileName
name: ClusterScanProfile
type: string
- jsonPath: .status.summary.total
name: Total
type: string
- jsonPath: .status.summary.pass
name: Pass
type: string
- jsonPath: .status.summary.fail
name: Fail
type: string
- jsonPath: .status.summary.skip
name: Skip
type: string
- jsonPath: .status.summary.warn
name: Warn
type: string
- jsonPath: .status.summary.notApplicable
name: Not Applicable
type: string
- jsonPath: .status.lastRunTimestamp
name: LastRunTimestamp
type: string
- jsonPath: .spec.scheduledScanConfig.cronSchedule
name: CronSchedule
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
scanProfileName:
nullable: true
type: string
scheduledScanConfig:
nullable: true
properties:
cronSchedule:
nullable: true
type: string
retentionCount:
type: integer
scanAlertRule:
nullable: true
properties:
alertOnComplete:
type: boolean
alertOnFailure:
type: boolean
type: object
type: object
scoreWarning:
enum:
- pass
- fail
nullable: true
type: string
type: object
status:
properties:
NextScanAt:
nullable: true
type: string
ScanAlertingRuleName:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
display:
nullable: true
properties:
error:
type: boolean
message:
nullable: true
type: string
state:
nullable: true
type: string
transitioning:
type: boolean
type: object
lastRunScanProfileName:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
observedGeneration:
type: integer
summary:
nullable: true
properties:
fail:
type: integer
notApplicable:
type: integer
pass:
type: integer
skip:
type: integer
total:
type: integer
warn:
type: integer
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,55 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanbenchmarks.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanBenchmark
plural: clusterscanbenchmarks
singular: clusterscanbenchmark
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.clusterProvider
name: ClusterProvider
type: string
- jsonPath: .spec.minKubernetesVersion
name: MinKubernetesVersion
type: string
- jsonPath: .spec.maxKubernetesVersion
name: MaxKubernetesVersion
type: string
- jsonPath: .spec.customBenchmarkConfigMapName
name: customBenchmarkConfigMapName
type: string
- jsonPath: .spec.customBenchmarkConfigMapNamespace
name: customBenchmarkConfigMapNamespace
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
clusterProvider:
nullable: true
type: string
customBenchmarkConfigMapName:
nullable: true
type: string
customBenchmarkConfigMapNamespace:
nullable: true
type: string
maxKubernetesVersion:
nullable: true
type: string
minKubernetesVersion:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,37 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanprofiles.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanProfile
plural: clusterscanprofiles
singular: clusterscanprofile
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
skipTests:
items:
nullable: true
type: string
nullable: true
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,40 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterscanreports.cis.cattle.io
spec:
group: cis.cattle.io
names:
kind: ClusterScanReport
plural: clusterscanreports
singular: clusterscanreport
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.lastRunTimestamp
name: LastRunTimestamp
type: string
- jsonPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
reportJSON:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: rancher-cis-benchmark-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: CIS Benchmark
catalog.cattle.io/kube-version: '>= 1.27.0-0 < 1.31.0-0'
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1
catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0'
catalog.cattle.io/release-name: rancher-cis-benchmark
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-cis-benchmark
apiVersion: v1
appVersion: v6.5.1
description: The cis-operator enables running CIS benchmark security scans on a kubernetes
cluster
icon: https://charts.rancher.io/assets/logos/cis-kube-bench.svg
keywords:
- security
name: rancher-cis-benchmark
version: 6.5.1

View File

@ -0,0 +1,9 @@
# Rancher CIS Benchmark Chart
The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports that can be downloaded.
# Installation
```
helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system
```

View File

@ -0,0 +1,31 @@
# Rancher CIS Benchmarks
This chart enables security scanning of the cluster using [CIS (Center for Internet Security) benchmarks](https://www.cisecurity.org/benchmark/kubernetes/).
For more information on how to use the feature, refer to our [docs](https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/cis-scan-guides).
This chart installs the following components:
- [cis-operator](https://github.com/rancher/cis-operator) - The cis-operator handles launching the [kube-bench](https://github.com/aquasecurity/kube-bench) tool that runs a suite of CIS tests on the nodes of your Kubernetes cluster. After scans finish, the cis-operator generates a compliance report that can be downloaded.
- Scans - A scan is a CRD (`ClusterScan`) that defines when to trigger CIS scans on the cluster based on the defined profile. A report is created after the scan is completed.
- Profiles - A profile is a CRD (`ClusterScanProfile`) that defines the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. This chart installs a few default `ClusterScanProfile` custom resources with no skipped tests, which can immediately be used to launch CIS scans.
- Benchmark Versions - A benchmark version is a CRD (`ClusterScanBenchmark`) that defines the CIS benchmark version to run using kube-bench as well as the valid configuration parameters for that benchmark. This chart installs a few default `ClusterScanBenchmark` custom resources.
- Alerting Resources - Rancher's CIS Benchmark application lets you run a cluster scan on a schedule, and send alerts when scans finish.
- If you want to enable alerts to be delivered when a cluster scan completes, you need to ensure that [Rancher's Monitoring and Alerting](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/) application is pre-installed and the [Receivers and Routes](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/#alertmanager-config) are configured to send out alerts.
- Additionally, you need to set `alerts: true` in the Values YAML while installing or upgrading this chart.
## CIS Kubernetes Benchmark support
| Source | Kubernetes distribution | scan profile | Kubernetes versions |
|--------|-------------------------|--------------------------------------------------------------------------------------------------------------------|---------------------|
| CIS | any | [cis-1.8](https://github.com/aquasecurity/kube-bench/tree/main/cfg/cis-1.8) | v1.26+ |
| CIS | rke | [rke-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/rke-cis-1.8-permissive) | rke1-v1.26+ |
| CIS | rke | [rke-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/rke-cis-1.8-hardened) | rke1-v1.26+ |
| CIS | rke2 | [rke2-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/rke2-cis-1.8-permissive) | rke2-v1.26+ |
| CIS | rke2 | [rke2-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/rke2-cis-1.8-hardened) | rke2-v1.26+ |
| CIS | k3s | [k3s-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/k3s-cis-1.8-permissive) | k3s-v1.26+ |
| CIS | k3s | [k3s-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.4/package/cfg/k3s-cis-1.8-hardened) | k3s-v1.26+ |
| CIS | eks | [eks-1.2.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/eks-1.2.0) | eks |
| CIS | aks | [aks-1.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/aks-1.0) | aks |
| CIS | gke | [gke-1.2.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/gke-1.2.0) | gke-1.20 |
| CIS | gke | [gke-1.6.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/gke-1.6.0) | gke-1.29+ |

View File

@ -0,0 +1,27 @@
{{/* Ensure namespace is set the same everywhere */}}
{{- define "cis.namespace" -}}
{{- .Release.Namespace | default "cis-operator-system" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,14 @@
{{- if .Values.alerts.enabled -}}
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: rancher-cis-pod-monitor
namespace: {{ template "cis.namespace" . }}
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
podMetricsEndpoints:
- port: cismetrics
{{- end }}

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: aks-1.0
spec:
clusterProvider: aks
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.8
spec:
clusterProvider: ""
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: eks-1.2.0
spec:
clusterProvider: eks
minKubernetesVersion: "1.15.0"

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: gke-1.2.0
spec:
clusterProvider: gke
minKubernetesVersion: "1.15.0"
maxKubernetesVersion: "1.28.x"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: gke-1.6.0
spec:
clusterProvider: gke
minKubernetesVersion: "1.29.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.8-hardened
spec:
clusterProvider: k3s
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: k3s-cis-1.8-permissive
spec:
clusterProvider: k3s
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.8-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.8-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.8-hardened
spec:
clusterProvider: rke2
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,8 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke2-cis-1.8-permissive
spec:
clusterProvider: rke2
minKubernetesVersion: "1.26.0"

View File

@ -0,0 +1,49 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-admin
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["create", "update", "delete", "patch","get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cis-view
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["get", "watch", "list"]
- apiGroups:
- catalog.cattle.io
resources: ["apps"]
resourceNames: ["rancher-cis-benchmark"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- configmaps
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,18 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: default-clusterscanprofiles
namespace: {{ template "cis.namespace" . }}
data:
# Default ClusterScanProfiles per cluster provider type
rke: |-
<1.21.0: rke-profile-permissive-1.20
>=1.21.0: rke-profile-permissive-1.8
rke2: |-
<1.21.0: rke2-cis-1.20-profile-permissive
>=1.21.0: rke2-cis-1.8-profile-permissive
eks: "eks-profile"
gke: "gke-profile-1.6.0"
aks: "aks-profile"
k3s: "k3s-cis-1.8-profile-permissive"
default: "cis-1.8-profile"

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cis-operator
namespace: {{ template "cis.namespace" . }}
labels:
cis.cattle.io/operator: cis-operator
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
template:
metadata:
labels:
cis.cattle.io/operator: cis-operator
spec:
serviceAccountName: cis-operator-serviceaccount
containers:
- name: cis-operator
image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}'
imagePullPolicy: IfNotPresent
ports:
- name: cismetrics
containerPort: {{ .Values.alerts.metricsPort }}
env:
- name: SECURITY_SCAN_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }}
- name: SECURITY_SCAN_IMAGE_TAG
value: {{ .Values.image.securityScan.tag }}
- name: SONOBUOY_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }}
- name: SONOBUOY_IMAGE_TAG
value: {{ .Values.image.sonobuoy.tag }}
- name: CIS_ALERTS_METRICS_PORT
value: '{{ .Values.alerts.metricsPort }}'
- name: CIS_ALERTS_SEVERITY
value: {{ .Values.alerts.severity }}
- name: CIS_ALERTS_ENABLED
value: {{ .Values.alerts.enabled | default "false" | quote }}
- name: CLUSTER_NAME
value: '{{ .Values.global.cattle.clusterName }}'
- name: CIS_OPERATOR_DEBUG
value: '{{ .Values.image.cisoperator.debug }}'
{{- if .Values.securityScanJob.overrideTolerations }}
- name: SECURITY_SCAN_JOB_TOLERATIONS
value: '{{ .Values.securityScanJob.tolerations | toJson }}'
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ template "cis.namespace" . }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,29 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: cis-operator-serviceaccount
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ template "cis.namespace" . }}]
backoffLimit: 1

View File

@ -0,0 +1,209 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-clusterrole
rules:
- apiGroups:
- "cis.cattle.io"
resources:
- "*"
verbs:
- "*"
- apiGroups:
- ""
resources:
- "pods"
- "services"
- "configmaps"
- "nodes"
- "serviceaccounts"
verbs:
- "get"
- "list"
- "create"
- "update"
- "watch"
- "patch"
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- "rolebindings"
- "clusterrolebindings"
- "clusterroles"
verbs:
- "get"
- "list"
- apiGroups:
- "batch"
resources:
- "jobs"
verbs:
- "list"
- "create"
- "patch"
- "update"
- "watch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-scan-ns
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "nodes"
- "pods"
- "serviceaccounts"
- "services"
- "replicationcontrollers"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- "rolebindings"
- "clusterrolebindings"
- "clusterroles"
verbs:
- "get"
- "list"
- apiGroups:
- "batch"
resources:
- "jobs"
- "cronjobs"
verbs:
- "list"
- apiGroups:
- "apps"
resources:
- "daemonsets"
- "deployments"
- "replicasets"
- "statefulsets"
verbs:
- "list"
- apiGroups:
- "autoscaling"
resources:
- "horizontalpodautoscalers"
verbs:
- "list"
- apiGroups:
- "networking.k8s.io"
resources:
- "networkpolicies"
verbs:
- "get"
- "list"
- "watch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cis-operator-role
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
namespace: {{ template "cis.namespace" . }}
rules:
- apiGroups:
- ""
resources:
- "services"
verbs:
- "watch"
- "list"
- "get"
- "patch"
- apiGroups:
- "batch"
resources:
- "jobs"
verbs:
- "watch"
- "list"
- "get"
- "delete"
- apiGroups:
- ""
resources:
- "configmaps"
- "pods"
- "secrets"
verbs:
- "*"
- apiGroups:
- "apps"
resources:
- "daemonsets"
verbs:
- "*"
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cis-operator-clusterrole
subjects:
- kind: ServiceAccount
name: cis-operator-serviceaccount
namespace: {{ template "cis.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cis-scan-ns
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cis-scan-ns
subjects:
- kind: ServiceAccount
name: cis-serviceaccount
namespace: {{ template "cis.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-rolebinding
namespace: {{ template "cis.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cis-operator-role
subjects:
- kind: ServiceAccount
name: cis-serviceaccount
namespace: {{ template "cis.namespace" . }}
- kind: ServiceAccount
name: cis-operator-serviceaccount
namespace: {{ template "cis.namespace" . }}

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: cis-1.8-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: cis-1.8

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: k3s-cis-1.8-profile-hardened
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: k3s-cis-1.8-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: k3s-cis-1.8-profile-permissive
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: k3s-cis-1.8-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-hardened-1.8
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.8-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-permissive-1.8
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.8-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.8-profile-hardened
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.8-hardened

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke2-cis-1.8-profile-permissive
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke2-cis-1.8-permissive

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: aks-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: aks-1.0

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: eks-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: eks-1.2.0

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: gke-profile-1.6.0
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: gke-1.6.0

View File

@ -0,0 +1,9 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: gke-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: gke-1.2.0

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
name: cis-operator-serviceaccount
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-serviceaccount

View File

@ -0,0 +1,17 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "cis.cattle.io/v1/ClusterScan" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanBenchmark" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanProfile" false -}}
# {{- set $found "cis.cattle.io/v1/ClusterScanReport" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,53 @@
# Default values for rancher-cis-benchmark.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
cisoperator:
repository: rancher/cis-operator
tag: v1.2.3
securityScan:
repository: rancher/security-scan
tag: v0.4.1
sonobuoy:
repository: rancher/mirrored-sonobuoy-sonobuoy
tag: v0.57.2
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
securityScanJob:
overrideTolerations: false
tolerations: []
affinity: {}
global:
cattle:
systemDefaultRegistry: ""
clusterName: ""
kubectl:
repository: rancher/kubectl
tag: v1.29.11
alerts:
enabled: false
severity: warning
metricsPort: 8080

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-eks-operator-crd
apiVersion: v2
appVersion: 1.9.5
description: EKS Operator CustomResourceDefinitions
name: rancher-eks-operator-crd
version: 104.5.0+up1.9.5

Some files were not shown because too many files have changed in this diff Show More