[dev-v2.9] Forward ports fleet 102.2.2+up0.8.2 from release-v2 (#3633)

pull/3641/head
Lucas Machado 2024-03-14 14:55:53 -03:00 committed by GitHub
parent 474ef4882f
commit bc598718c4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
41 changed files with 12160 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.8.2
description: Fleet Manager Agent - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
version: 102.2.2+up0.8.2

View File

@ -0,0 +1,8 @@
## Fleet Agent Helm Chart
Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster.
Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated).
Fleet in Rancher does not use this chart, but creates the agent deployments programmatically.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,12 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fleet-agent
data:
config: |-
{
{{ if .Values.labels }}
"labels":{{toJson .Values.labels}},
{{ end }}
"clientID":"{{.Values.clientID}}"
}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-agent
spec:
selector:
matchLabels:
app: fleet-agent
template:
metadata:
labels:
app: fleet-agent
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: '{{ template "system_default_registry" . }}{{.Values.image.repository}}:{{.Values.image.tag}}'
name: fleet-agent
command:
- fleetagent
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
serviceAccountName: fleet-agent
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.fleetAgent.nodeSelector }}
{{ toYaml .Values.fleetAgent.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.fleetAgent.tolerations }}
{{ toYaml .Values.fleetAgent.tolerations | indent 8 }}
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,15 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ .Values.internal.systemNamespace }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,28 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-fleet-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: fleet-agent
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ .Values.internal.systemNamespace }}]
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.kubectl.nodeSelector }}
{{ toYaml .Values.kubectl.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.kubectl.tolerations }}
{{ toYaml .Values.kubectl.tolerations | indent 8 }}
{{- end }}
backoffLimit: 1

View File

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-agent-system-fleet-agent-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-agent-system-fleet-agent-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-agent-system-fleet-agent-role
subjects:
- kind: ServiceAccount
name: fleet-agent
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,10 @@
apiVersion: v1
data:
systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}"
clusterNamespace: "{{b64enc .Values.clusterNamespace}}"
token: "{{b64enc .Values.token}}"
apiServerURL: "{{b64enc .Values.apiServerURL}}"
apiServerCA: "{{b64enc .Values.apiServerCA}}"
kind: Secret
metadata:
name: fleet-agent-bootstrap

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-agent

View File

@ -0,0 +1,11 @@
{{if ne .Release.Namespace .Values.internal.systemNamespace }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }}
{{end}}
{{if ne .Release.Name .Values.internal.managedReleaseName }}
{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }}
{{end}}
{{if not .Values.apiServerURL }}
{{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }}
{{end}}

View File

@ -0,0 +1,63 @@
image:
os: "windows,linux"
repository: rancher/fleet-agent
tag: v0.8.2
# The public URL of the Kubernetes API server running the Fleet Manager must be set here
# Example: https://example.com:6443
apiServerURL: ""
# The the pem encoded value of the CA of the Kubernetes API server running the Fleet Manager.
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# The cluster registration value
token: ""
# Labels to add to the cluster upon registration only. They are not added after the fact.
#labels:
# foo: bar
# The client ID of the cluster to associate with
clientID: ""
# The namespace of the cluster we are register with
clusterNamespace: ""
# The namespace containing the clusters registration secrets
systemRegistrationNamespace: cattle-fleet-clusters-system
# Please do not change the below setting unless you really know what you are doing
internal:
systemNamespace: cattle-fleet-system
managedReleaseName: fleet-agent
# The nodeSelector and tolerations for the agent deployment
fleetAgent:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
kubectl:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: "Equal"
value: "true"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.21.5
debug: false
debugLevel: 0

View File

@ -0,0 +1,13 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.8.2
description: Fleet Manager CustomResourceDefinitions
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
version: 102.2.2+up0.8.2

View File

@ -0,0 +1,5 @@
# Fleet CRD Helm Chart
Fleet Manager CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart.
The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
# This file is intentionally empty

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.8.2
dependencies:
- condition: gitops.enabled
name: gitjob
repository: file://./charts/gitjob
description: Fleet Manager - GitOps at Scale
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
version: 102.2.2+up0.8.2

View File

@ -0,0 +1,30 @@
# Fleet Helm Chart
Fleet is GitOps at scale. Fleet is designed to manage multiple clusters.
## What is Fleet?
* Cluster engine: Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Fleet focuses not only on the ability to scale, but it also gives users a high degree of control and visibility to monitor exactly what is installed on the cluster.
* Deployment management: Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, Kustomize, or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy all resources in the cluster. As a result, users can enjoy a high degree of control, consistency, and auditability of their clusters.
## Introduction
This chart deploys Fleet on a Kubernetes cluster. It also deploys some of its dependencies as subcharts.
The documentation is centralized in the [doc website](https://fleet.rancher.io/).
## Prerequisites
Get helm if you don't have it. Helm 3 is just a CLI.
## Install Fleet
Install the Fleet Helm charts (there are two because we separate out CRDs for ultimate flexibility.):
```
$ helm repo add fleet https://rancher.github.io/fleet-helm-charts/
$ helm -n cattle-fleet-system install --create-namespace --wait fleet-crd fleet/fleet-crd
$ helm -n cattle-fleet-system install --create-namespace --wait fleet fleet/fleet
```

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
appVersion: 0.8.2
description: Controller that run jobs based on git events
name: gitjob
version: 0.8.2

View File

@ -0,0 +1,7 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,38 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gitjob
rules:
- apiGroups:
- "batch"
resources:
- 'jobs'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'pods'
verbs:
- 'list'
- 'get'
- 'watch'
- apiGroups:
- ""
resources:
- 'secrets'
verbs:
- '*'
- apiGroups:
- ""
resources:
- 'configmaps'
verbs:
- '*'
- apiGroups:
- "gitjob.cattle.io"
resources:
- "gitjobs"
- "gitjobs/status"
verbs:
- "*"

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitjob-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitjob
spec:
selector:
matchLabels:
app: "gitjob"
template:
metadata:
labels:
app: "gitjob"
spec:
serviceAccountName: gitjob
containers:
- image: "{{ template "system_default_registry" . }}{{ .Values.gitjob.repository }}:{{ .Values.gitjob.tag }}"
name: gitjob
args:
{{- if .Values.debug }}
- --debug
{{- end }}
- --tekton-image
- "{{ template "system_default_registry" . }}{{ .Values.tekton.repository }}:{{ .Values.tekton.tag }}"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}

View File

@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitjob
rules:
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitjob
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitjob
subjects:
- kind: ServiceAccount
name: gitjob

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: gitjob
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: "gitjob"

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitjob

View File

@ -0,0 +1,31 @@
gitjob:
repository: rancher/gitjob
tag: v0.8.2
tekton:
repository: rancher/tekton-utils
tag: v0.1.37
global:
cattle:
systemDefaultRegistry: ""
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
# PriorityClassName assigned to deployment.
priorityClassName: ""
debug: false

View File

@ -0,0 +1,22 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fleet-controller
data:
config: |
{
"systemDefaultRegistry": "{{ template "system_default_registry" . }}",
"agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}",
"agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}",
"apiServerURL": "{{.Values.apiServerURL}}",
"apiServerCA": "{{b64enc .Values.apiServerCA}}",
"agentCheckinInterval": "{{.Values.agentCheckinInterval}}",
"ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}},
"bootstrap": {
"paths": "{{.Values.bootstrap.paths}}",
"repo": "{{.Values.bootstrap.repo}}",
"secret": "{{.Values.bootstrap.secret}}",
"branch": "{{.Values.bootstrap.branch}}",
"namespace": "{{.Values.bootstrap.namespace}}",
"agentNamespace": "{{.Values.bootstrap.agentNamespace}}",
},
"webhookReceiverURL": "{{.Values.webhookReceiverURL}}",
"githubURLPrefix": "{{.Values.githubURLPrefix}}"
}

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fleet-controller
spec:
selector:
matchLabels:
app: fleet-controller
template:
metadata:
labels:
app: fleet-controller
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS
value: {{ quote .Values.propagateDebugSettingsToAgents }}
{{- if .Values.clusterEnqueueDelay }}
- name: FLEET_CLUSTER_ENQUEUE_DELAY
value: {{ .Values.clusterEnqueueDelay }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_DIR
value: /tmp/pprof/
{{- end }}
{{- if .Values.cpuPprof }}
- name: FLEET_CPU_PPROF_PERIOD
value: {{ quote .Values.cpuPprof.period }}
{{- end }}
{{- if .Values.debug }}
- name: CATTLE_DEV_MODE
value: "true"
{{- end }}
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
name: fleet-controller
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
command:
- fleetcontroller
{{- if not .Values.gitops.enabled }}
- --disable-gitops
{{- end }}
{{- if not .Values.bootstrap.enabled }}
- --disable-bootstrap
{{- end }}
{{- if .Values.debug }}
- --debug
- --debug-level
- {{ quote .Values.debugLevel }}
{{- else }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
{{- if .Values.cpuPprof }}
- mountPath: /tmp/pprof
name: pprof
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- if .Values.cpuPprof }}
- name: pprof {{ toYaml .Values.cpuPprof.volumeConfiguration | nindent 10 }}
{{- end }}
serviceAccountName: fleet-controller
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{.Values.priorityClassName}}"
{{- end }}
{{- if not .Values.debug }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.migrations.clusterRegistrationCleanup }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: fleet-cleanup-clusterregistrations
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
metadata:
labels:
app: fleet-job
spec:
serviceAccountName: fleet-controller
restartPolicy: Never
containers:
- name: cleanup
image: "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command:
- fleet
args:
- cleanup
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
backoffLimit: 1
{{- end }}

View File

@ -0,0 +1,114 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller
rules:
- apiGroups:
- gitjob.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- fleet.cattle.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: fleet-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fleet-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: fleet-controller
subjects:
- kind: ServiceAccount
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-controller-bootstrap
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-controller-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fleet-controller-bootstrap
subjects:
- kind: ServiceAccount
name: fleet-controller-bootstrap
namespace: {{.Release.Namespace}}
{{- end }}

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller
{{- if .Values.bootstrap.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-controller-bootstrap
{{- end }}

View File

@ -0,0 +1,83 @@
image:
repository: rancher/fleet
tag: v0.8.2
imagePullPolicy: IfNotPresent
agentImage:
repository: rancher/fleet-agent
tag: v0.8.2
imagePullPolicy: IfNotPresent
# For cluster registration the public URL of the Kubernetes API server must be set here
# Example: https://example.com:6443
apiServerURL: ""
# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here
# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA.
apiServerCA: ""
# A duration string for how often agents should report a heartbeat
agentCheckinInterval: "15m"
# Whether you want to allow cluster upon registration to specify their labels.
ignoreClusterRegistrationLabels: false
# Counts from gitrepo are out of sync with bundleDeployment state.
# Just retry in a number of seconds as there is no great way to trigger an event that doesn't cause a loop.
# If not set default is 15 seconds.
# clusterEnqueueDelay: 120s
# http[s] proxy server
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
bootstrap:
enabled: true
# The namespace that will be autocreated and the local cluster will be registered in
namespace: fleet-local
# The namespace where the fleet agent for the local cluster will be ran, if empty
# this will default to cattle-fleet-system
agentNamespace: ""
# A repo to add at install time that will deploy to the local cluster. This allows
# one to fully bootstrap fleet, its configuration and all its downstream clusters
# in one shot.
repo: ""
secret: ""
branch: master
paths: ""
global:
cattle:
systemDefaultRegistry: ""
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
## PriorityClassName assigned to deployment.
priorityClassName: ""
gitops:
enabled: true
debug: false
debugLevel: 0
propagateDebugSettingsToAgents: true
## Optional CPU pprof configuration. Profiles are collected continuously and saved every period
## Any valid volume configuration can be provided, the example below uses hostPath
#cpuPprof:
# period: "60s"
# volumeConfiguration:
# hostPath:
# path: /tmp/pprof
# type: DirectoryOrCreate
migrations:
clusterRegistrationCleanup: true

View File

@ -633,6 +633,32 @@ entries:
urls: urls:
- assets/fleet/fleet-103.1.0+up0.9.0.tgz - assets/fleet/fleet-103.1.0+up0.9.0.tgz
version: 103.1.0+up0.9.0 version: 103.1.0+up0.9.0
- annotations:
catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/experimental: "true"
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet
apiVersion: v2
appVersion: 0.8.2
created: "2024-03-14T11:43:10.206688-03:00"
dependencies:
- condition: gitops.enabled
name: gitjob
repository: file://./charts/gitjob
description: Fleet Manager - GitOps at Scale
digest: 38761c7565a4feebfe30e91cb72cb105408c0488906498036de6dfef088be82e
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet
urls:
- assets/fleet/fleet-102.2.2+up0.8.2.tgz
version: 102.2.2+up0.8.2
- annotations: - annotations:
catalog.cattle.io/auto-install: fleet-crd=match catalog.cattle.io/auto-install: fleet-crd=match
catalog.cattle.io/certified: rancher catalog.cattle.io/certified: rancher
@ -1273,6 +1299,25 @@ entries:
urls: urls:
- assets/fleet-agent/fleet-agent-103.1.0+up0.9.0.tgz - assets/fleet-agent/fleet-agent-103.1.0+up0.9.0.tgz
version: 103.1.0+up0.9.0 version: 103.1.0+up0.9.0
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: fleet-agent
apiVersion: v2
appVersion: 0.8.2
created: "2024-03-14T11:43:20.897323-03:00"
description: Fleet Manager Agent - GitOps at Scale
digest: 0648acb64551200b2b7ab30856982b6d3ed513c5bdb6b714102ccf8d65b14de6
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-agent
urls:
- assets/fleet-agent/fleet-agent-102.2.2+up0.8.2.tgz
version: 102.2.2+up0.8.2
- annotations: - annotations:
catalog.cattle.io/certified: rancher catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true" catalog.cattle.io/hidden: "true"
@ -1758,6 +1803,23 @@ entries:
urls: urls:
- assets/fleet-crd/fleet-crd-103.1.0+up0.9.0.tgz - assets/fleet-crd/fleet-crd-103.1.0+up0.9.0.tgz
version: 103.1.0+up0.9.0 version: 103.1.0+up0.9.0
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-fleet-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: fleet-crd
apiVersion: v2
appVersion: 0.8.2
created: "2024-03-14T11:43:15.910558-03:00"
description: Fleet Manager CustomResourceDefinitions
digest: fe160494b5ce50211d8b0dea6b07a1d24908de6f63a123b2637b09bd85ca155e
icon: https://charts.rancher.io/assets/logos/fleet.svg
name: fleet-crd
urls:
- assets/fleet-crd/fleet-crd-102.2.2+up0.8.2.tgz
version: 102.2.2+up0.8.2
- annotations: - annotations:
catalog.cattle.io/certified: rancher catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true" catalog.cattle.io/hidden: "true"

View File

@ -1,9 +1,12 @@
fleet: fleet:
- 104.0.0+up0.10.0-rc.4 - 104.0.0+up0.10.0-rc.4
- 102.2.2+up0.8.2
fleet-agent: fleet-agent:
- 104.0.0+up0.10.0-rc.4 - 104.0.0+up0.10.0-rc.4
- 102.2.2+up0.8.2
fleet-crd: fleet-crd:
- 104.0.0+up0.10.0-rc.4 - 104.0.0+up0.10.0-rc.4
- 102.2.2+up0.8.2
longhorn: longhorn:
- 102.3.1+up1.5.3 - 102.3.1+up1.5.3
- 102.3.2+up1.5.4 - 102.3.2+up1.5.4