Merge pull request #3207 from rancher/feat-2-8-batch-6

[release-v2.8] Feature Charts batch release for harvester, rancher-csp-adapter and rancher-logging
pull/3215/head
Sakala Venkata Krishna Rohit 2023-10-31 13:48:08 -07:00 committed by GitHub
commit e264f95bb7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 45194 additions and 41 deletions

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester Cloud Provider
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-cloud-provider
catalog.cattle.io/ui-component: harvester-cloud-provider
catalog.cattle.io/upstream-version: 0.1.14
apiVersion: v2
appVersion: v0.1.5
description: A Helm chart for Harvester Cloud Provider
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-cloud-provider
type: application
version: 103.0.0+up0.1.14

View File

@ -0,0 +1,3 @@
replicasCount: 1
# It's an existent but invalid kubeconfig, just for helm installation testing in kind
cloudConfigPath: "/etc/kubernetes/admin.conf"

View File

@ -0,0 +1,11 @@
categories:
- infrastructure
- harvester
namespace: kube-system
questions:
- variable: cloudConfigPath
label: Cloud config file path
description: "Specify the path of the cloud config."
group: "Default"
type: string
default: "/etc/kubernetes/cloud-config"

View File

@ -0,0 +1,69 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "harvester-cloud-provider.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "harvester-cloud-provider.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "harvester-cloud-provider.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "harvester-cloud-provider.labels" -}}
helm.sh/chart: {{ include "harvester-cloud-provider.chart" . }}
{{ include "harvester-cloud-provider.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "harvester-cloud-provider.selectorLabels" -}}
app.kubernetes.io/name: {{ include "harvester-cloud-provider.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "harvester-cloud-provider.serviceAccountName" -}}
{{- default (include "harvester-cloud-provider.fullname" .) .Values.serviceAccount.name }}
{{- end }}
{{/*
Global system default registry
*/}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,53 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
name: {{ include "harvester-cloud-provider.name" . }}
spec:
replicas: {{ .Values.replicasCount}}
selector:
matchLabels:
{{- include "harvester-cloud-provider.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "harvester-cloud-provider.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: {{ include "harvester-cloud-provider.name" . }}
hostNetwork: true
containers:
- name: {{ include "harvester-cloud-provider.name" . }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --cloud-config=/etc/kubernetes/cloud-config
{{- if ne .Values.global.cattle.clusterName "" }}
- --cluster-name={{ .Values.global.cattle.clusterName }}
{{- end }}
command:
- harvester-cloud-provider
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/kubernetes/cloud-config
name: cloud-config
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: cloud-config
hostPath:
path: {{ required "A valid cloudConfigPath is required!" .Values.cloudConfigPath }}
type: File

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
rules:
- apiGroups: [ "" ]
resources: [ "services", "nodes", "events" ]
verbs: [ "get", "watch", "list", "update", "create", "patch" ]
- apiGroups: [ "" ]
resources: [ "nodes/status", "services/status" ]
verbs: [ "update", "patch" ]
- apiGroups: [ "coordination.k8s.io" ]
resources: [ "leases" ]
verbs: [ "get", "update", "create" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "harvester-cloud-provider.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "harvester-cloud-provider.name" . }}
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,71 @@
# Default values for harvester-cloud-provider.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicasCount: 1
image:
repository: rancher/harvester-cloud-provider
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: v0.1.5
cloudConfigPath: "/etc/kubernetes/cloud-config"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Equal
value: "true"
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Equal
- key: cattle.io/os
operator: Equal
value: "linux"
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- harvester-cloud-provider
topologyKey: kubernetes.io/hostname
global:
cattle:
systemDefaultRegistry: ""
clusterName: ""

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-csi-driver
catalog.cattle.io/ui-component: harvester-csi-driver
catalog.cattle.io/upstream-version: 0.1.16
apiVersion: v2
appVersion: v0.1.5
description: A Helm chart for Harvester CSI driver
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-csi-driver
type: application
version: 103.0.0+up0.1.16

View File

@ -0,0 +1,11 @@
categories:
- infrastructure
- harvester
namespace: kube-system
questions:
- variable: cloudConfig.hostPath
label: Cloud config file path
description: "Specify the path of the cloud config."
group: "Default"
type: string
default: "/etc/kubernetes/cloud-config"

View File

@ -0,0 +1 @@
Successfully deployed Harvester CSI driver to the {{ .Release.Namespace }} namespace.

View File

@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "harvester-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "harvester-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "harvester-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "harvester-csi-driver.labels" -}}
helm.sh/chart: {{ include "harvester-csi-driver.chart" . }}
{{ include "harvester-csi-driver.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "harvester-csi-driver.selectorLabels" -}}
app.kubernetes.io/name: {{ include "harvester-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Global system default registry
*/}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: driver.harvesterhci.io
spec:
attachRequired: true
fsGroupPolicy: ReadWriteOnceWithFSType
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View File

@ -0,0 +1,149 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --kubelet-registration-path={{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io/csi.sock
env:
- name: ADDRESS
value: /csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/driver.harvesterhci.io-reg.sock
/csi//*
name: node-driver-registrar
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi/
name: socket-dir
- mountPath: /registration
name: registration-dir
- args:
- --nodeid=$(NODE_ID)
- --endpoint=$(CSI_ENDPOINT)
- --kubeconfig=/var/lib/harvester/cloud-provider-config
env:
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.harvester.csiDriver.repository }}:{{ .Values.image.harvester.csiDriver.tag | default .Chart.AppVersion }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -f /csi//*
name: harvester-csi-driver
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- SYS_ADMIN
privileged: true
volumeMounts:
- name: cloud-config
readOnly: true
mountPath: /var/lib/harvester
- name: kubernetes
readOnly: true
mountPath: /etc/kubernetes
- mountPath: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
mountPropagation: Bidirectional
name: kubernetes-csi-dir
- mountPath: /csi/
name: socket-dir
- mountPath: {{ .Values.kubeletRootDir }}/pods
mountPropagation: Bidirectional
name: pods-mount-dir
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /rootfs
mountPropagation: Bidirectional
name: host
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostPID: true
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: cloud-config
{{- if .Values.cloudConfig.secretName }}
secret:
secretName: {{ .Values.cloudConfig.secretName }}
{{- else }}
hostPath:
path: {{ .Values.cloudConfig.hostPath }}
type: DirectoryOrCreate
{{- end }}
- hostPath:
path: /etc/kubernetes
type: DirectoryOrCreate
name: kubernetes
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate
name: kubernetes-csi-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/pods
type: DirectoryOrCreate
name: pods-mount-dir
- hostPath:
path: /dev
name: host-dev
- hostPath:
path: /sys
name: host-sys
- hostPath:
path: /
name: host
- hostPath:
path: /lib/modules
name: lib-modules

View File

@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "harvester-csi-driver.name" . }}-controllers
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicasCount }}
selector:
matchLabels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-resizer
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
- --default-fstype=ext4
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-provisioner
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-attacher
volumeMounts:
- mountPath: /csi/
name: socket-dir
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "harvester-csi-driver.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [ "coordination.k8s.io" ]
resources: [ "leases" ]
verbs: [ "get", "watch", "list", "delete", "update", "create" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csistoragecapacities" ]
verbs: [ "get", "list", "watch", "create", "update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: [ "get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csinodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "apps" ]
resources: [ "replicasets" ]
verbs: [ "get" ]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "volumeattachments/status" ]
verbs: [ "patch" ]

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: harvester
annotations:
storageclass.kubernetes.io/is-default-class: "true"
allowVolumeExpansion: true
provisioner: driver.harvesterhci.io
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -0,0 +1,54 @@
# Default values for harvester-csi-driver.
replicasCount: 3
image:
harvester:
csiDriver:
repository: rancher/harvester-csi-driver
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.1.5"
csi:
nodeDriverRegistrar:
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
tag: v2.3.0
resizer:
repository: rancher/mirrored-longhornio-csi-resizer
tag: v1.2.0
provisioner:
repository: rancher/mirrored-longhornio-csi-provisioner
tag: v2.1.2
attacher:
repository: rancher/mirrored-longhornio-csi-attacher
tag: v3.2.1
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
kubeletRootDir: /var/lib/kubelet
cloudConfig:
secretName: ""
hostPath: "/var/lib/rancher/rke2/etc/config-files/"
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: kubevirt.io/drain
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Equal
- key: cattle.io/os
operator: Equal
value: "linux"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""

View File

@ -0,0 +1,17 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher CSP Adapter
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-csp-adapter-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-csp-adapter
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 3.0.0
description: Cloud Service Provider Marketplace Adapter for Rancher. Monitors Rancher
entitlements against usage.
name: rancher-csp-adapter
version: 103.0.0+up3.0.0

View File

@ -0,0 +1,57 @@
{{- define "csp-adapter.labels" -}}
app: rancher-csp-adapter
{{- end }}
{{- define "csp-adapter.outputConfigMap" -}}
csp-config
{{- end }}
{{- define "csp-adapter.outputNotification" -}}
csp-compliance
{{- end }}
{{- define "csp-adapter.cacheSecret" -}}
csp-adapter-cache
{{- end }}
{{- define "csp-adapter.hostnameSetting" -}}
server-url
{{- end }}
{{- define "csp-adapter.versionSetting" -}}
server-version
{{- end }}
{{- define "csp-adapter.csp" -}}
{{- if .Values.aws -}}
{{- if .Values.aws.enabled -}}
aws
{{- end -}}
{{- else -}}
""
{{- end -}}
{{- end }}
{{- define "csp-adapter.awsValuesSet" -}}
{{- if .Values.aws -}}
{{- if and .Values.aws.accountNumber .Values.aws.roleName -}}
true
{{- else -}}
false
{{- end -}}
{{- else -}}
false
{{- end -}}
{{- end }}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- if eq (include "csp-adapter.csp" .) "aws" -}}
{{- "709825985650.dkr.ecr.us-east-1.amazonaws.com/suse/" -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Chart.Name }}
namespace: cattle-csp-adapter-system
spec:
selector:
matchLabels:
app: {{ .Chart.Name }}
template:
metadata:
labels:
app: {{ .Chart.Name }}
spec:
containers:
- env:
- name: CATTLE_DEBUG
value: {{ .Values.debug | quote }}
- name: CATTLE_DEV_MODE
value: {{ .Values.devMode | quote }}
- name: K8S_OUTPUT_CONFIGMAP
value: '{{ template "csp-adapter.outputConfigMap" }}'
- name: K8S_OUTPUT_NOTIFICATION
value: '{{ template "csp-adapter.outputNotification" }}'
- name: K8S_CACHE_SECRET
value: '{{ template "csp-adapter.cacheSecret" }}'
- name: K8S_HOSTNAME_SETTING
value: '{{ template "csp-adapter.hostnameSetting" }}'
- name: K8S_RANCHER_VERSION_SETTING
value: '{{ template "csp-adapter.versionSetting" }}'
image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
name: {{ .Chart.Name }}
imagePullPolicy: "{{ .Values.image.imagePullPolicy }}"
{{- if .Values.additionalTrustedCAs }}
volumeMounts:
- mountPath: /etc/ssl/certs/rancher-cert.pem
name: tls-ca-volume
subPath: ca-additional.pem
readOnly: true
{{- end }}
serviceAccountName: {{ .Chart.Name }}
{{- if .Values.additionalTrustedCAs }}
volumes:
- name: tls-ca-volume
secret:
defaultMode: 0444
secretName: tls-ca-additional
{{- end }}

View File

@ -0,0 +1,114 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Chart.Name }}-cluster-role
rules:
- apiGroups:
- management.cattle.io
resources:
- ranchermetrics
verbs:
- get
- apiGroups:
- management.cattle.io
resources:
- rancherusernotifications
resourceNames:
- {{ template "csp-adapter.outputNotification" }}
verbs:
- "*"
- apiGroups:
- management.cattle.io
resources:
- rancherusernotifications
verbs:
- create
- apiGroups:
- management.cattle.io
resources:
- settings
resourceNames:
- {{ template "csp-adapter.hostnameSetting" }}
- {{ template "csp-adapter.versionSetting" }}
verbs:
- get
- list
- watch
- apiGroups:
- apiregistration.k8s.io
resources:
- apiservices
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Chart.Name }}-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-cluster-role
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}
namespace: cattle-csp-adapter-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Chart.Name }}-role
namespace: cattle-csp-adapter-system
rules:
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- {{ template "csp-adapter.cacheSecret" }}
verbs:
- "*"
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- {{ template "csp-adapter.outputConfigMap" }}
verbs:
- "*"
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Chart.Name }}-binding
namespace: cattle-csp-adapter-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Chart.Name }}-role
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}
namespace: cattle-csp-adapter-system

View File

@ -0,0 +1,17 @@
{{- if eq (include "csp-adapter.csp" . ) "aws" -}}
{{- if eq (include "csp-adapter.awsValuesSet" .) "true" -}}
{{- else -}}
{{- fail "If the aws provider is enabled, account number and role name must be provided" -}}
{{- end -}}
{{- else -}}
{{- fail "One cloud provider must be enabled" -}}
{{- end -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}
namespace: cattle-csp-adapter-system
{{- if eq (include "csp-adapter.csp" . ) "aws" }}
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.aws.accountNumber }}:role/{{ .Values.aws.roleName }}
{{- end }}

View File

@ -0,0 +1,24 @@
debug: false
# used for development only - not supported in production
devMode: false
image:
repository: rancher/rancher-csp-adapter
tag: v3.0.0
imagePullPolicy: IfNotPresent
global:
cattle:
systemDefaultRegistry: ""
tolerations: []
# if rancher is using a privateCA, this certificate must be provided as a secret in the adapter's namespace - see the
# readme/docs for more details
#additionalTrustedCAs: true
# at least one csp must be enabled like below
aws:
enabled: false
accountNumber: ""
roleName: ""

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/release-name: rancher-logging-crd
apiVersion: v1
description: Installs the CRDs for rancher-logging.
name: rancher-logging-crd
type: application
version: 103.0.0+up3.17.10

View File

@ -0,0 +1,2 @@
# rancher-logging-crd
A Rancher chart that installs the CRDs used by rancher-logging.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,25 @@
annotations:
catalog.cattle.io/auto-install: rancher-logging-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Logging
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-logging
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: logging
catalog.cattle.io/upstream-version: 3.17.10
apiVersion: v1
appVersion: 3.17.10
description: Collects and filter logs using highly configurable CRDs. Powered by Banzai
Cloud Logging Operator.
icon: https://charts.rancher.io/assets/logos/logging.svg
keywords:
- logging
- monitoring
- security
name: rancher-logging
version: 103.0.0+up3.17.10

View File

@ -0,0 +1,132 @@
# Logging operator Chart
[Logging operator](https://github.com/banzaicloud/logging-operator) Managed centralized logging component fluentd and fluent-bit instance on cluster.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator
```
## Introduction
This chart bootstraps a [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.8+ with Beta APIs enabled
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install --name my-release banzaicloud-stable/logging-operator
```
### CRDs
Use `createCustomResource=false` with Helm v3 to avoid trying to create CRDs from the `crds` folder and from templates at the same time.
The command deploys **Logging operator** on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete my-release
```
The command removes all Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the logging-operator chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ |-----------------------------------------------------------------------|
| `image.repository` | Container image repository | `ghcr.io/banzaicloud/logging-operator` |
| `image.tag` | Container image tag | `3.17.10` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `nameOverride` | Override name of app | `` |
| `fullnameOverride` | Override full name of app | `` |
| `namespaceOverride` | Override namespace of app | `` |
| `watchNamespace` | Namespace to watch for LoggingOperator CRD | `` |
| `rbac.enabled` | Create rbac service account and roles | `true` |
| `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` |
| `priorityClassName` | Operator priorityClassName | `{}` |
| `affinity` | Node Affinity | `{}` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `tolerations` | Node Tolerations | `[]` |
| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
| `podLabels` | Define custom labels for logging-operator pods | `{}` |
| `annotations` | Define annotations for logging-operator pods | `{}` |
| `podSecurityContext` | Pod SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` |
| `securityContext` | Container SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"allowPrivilegeEscalation": false, "readOnlyRootFilesystem": true}` |
| `createCustomResource` | Create CRDs. | `true` |
| `monitoring.serviceMonitor.enabled` | Create Prometheus Operator servicemonitor. | `false` |
| `serviceAccount.annotations` | Define annotations for logging-operator ServiceAccount | `{}` |
| `global.seLinux.enabled` | Add seLinuxOptions to Logging resources, requires the [rke2-selinux RPM](https://github.com/rancher/rke2-selinux/releases) | `false` |
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example:
```bash
$ helm install --name my-release -f values.yaml banzaicloud-stable/logging-operator
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Installing Fluentd and Fluent-bit via logging
The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on cluster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator-logging
```
## Configuration
The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator-logging
```
## Configuration
The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ |------------------------------------------------------------|
| `tls.enabled` | Enabled TLS communication between components | true |
| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
| `fluentbit.enabled` | Install fluent-bit | true |
| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
| `fluentbit.image.tag` | Fluentbit container image tag | `1.9.5` |
| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
| `fluentd.enabled` | Install fluentd | true |
| `fluentd.image.tag` | Fluentd container image tag | `v1.14.6-alpine-5` |
| `fluentd.image.repository` | Fluentd container image repository | `ghcr.io/banzaicloud/fluentd` |
| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
| `fluentd.volumeModImage.repository` | Fluentd volumeModImage container image repository | `busybox` |
| `fluentd.volumeModImage.pullPolicy` | Fluentd volumeModImage container pull policy | `IfNotPresent` |
| `fluentd.configReloaderImage.tag` | Fluentd configReloaderImage container image tag | `v0.2.2` |
| `fluentd.configReloaderImage.repository` | Fluentd configReloaderImage container image repository | `jimmidyson/configmap-reload` |
| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
| `fluentd.fluentdPvcSpec.storageClassName` | Fluentd persistence volume storageclass | `"""` |

View File

@ -0,0 +1,45 @@
# Rancher Logging
This chart is based off of the upstream [Banzai Logging Operator](https://banzaicloud.com/docs/one-eye/logging-operator/) chart. The chart deploys a logging operator and CRDs, which allows users to configure complex logging pipelines with a few simple custom resources. There are two levels of logging, which allow you to collect all logs in a cluster or from a single namespace.
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/logging/v2.7/).
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
## Namespace-level logging
To collect logs from a single namespace, users create flows and these flows are connected to outputs or cluster outputs.
## Cluster-level logging
To collect logs from an entire cluster, users create cluster flows and cluster outputs.
## CRDs
- [Cluster Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusterflow_types/) - A cluster flow is a CRD (`ClusterFlow`) that defines what logs to collect from the entire cluster. The cluster flow must be deployed in the same namespace as the logging operator.
- [Cluster Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusteroutput_types/) - A cluster output is a CRD (`ClusterOutput`) that defines how to connect to logging providers so they can start collecting logs. The cluster output must be deployed in the same namespace as the logging operator. The convenience of using a cluster output is that either a cluster flow or flow can send logs to those providers without needing to define specific outputs in each namespace for each flow.
- [Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/flow_types/) - A flow is a CRD (`Flow`) that defines what logs to collect from the namespace that it is deployed in.
- [Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/output_types/) - An output is a CRD (`Output`) that defines how to connect to logging providers so logs can be sent to the provider.
For more information on how to configure the Helm chart, refer to the Helm README.
## Systemd Configuration
Some Kubernetes distributions log to journald. In order to collect these logs the `systemdLogPath` needs to be defined. While the `/run/log/journal` directory is used by default, some Linux distributions do not default to this path. For example Ubuntu defaults to `/var/log/journal`. To determine your `systemdLogPath` run `cat /etc/systemd/journald.conf | grep -E ^\#?Storage | cut -d"=" -f2` on one of your nodes. If `persistent` is returned your `systemdLogPath` should be `/var/log/journal`. If `volatile` is returned `systemdLogPath` should be `/run/log/journal`. If `auto` is returned check if `/var/log/journal` exists, and if it does then use `/var/log/journal`, otherwise use `/run/log/journal`.
If any value not described here is returned, Rancher Logging will not be able to collect control plane logs. To address this issue set `Storage=volatile` in journald.conf, reboot your machine, and set `systemdLogPath` to `/run/log/journal`.

View File

@ -0,0 +1,121 @@
{{- define "logging-operator.logging.tpl" -}}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "logging-operator.fluentbitImageRepository" . }}
tag: {{ template "logging-operator.fluentbitImageTag" . }}
{{- if not .Values.disablePvc }}
{{- with .Values.fluentbit.bufferStorage }}
bufferStorage: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.bufferStorageVolume }}
bufferStorageVolume: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- if or .Values.global.cattle.psp.enabled .Values.global.seLinux.enabled }}
security:
{{- end }}
{{- if .Values.global.cattle.psp.enabled }}
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
{{- if or .Values.fluentbit.inputTail.Buffer_Chunk_Size .Values.fluentbit.inputTail.Buffer_Max_Size .Values.fluentbit.inputTail.Mem_Buf_Limit .Values.fluentbit.inputTail.Multiline_Flush .Values.fluentbit.inputTail.Skip_Long_Lines }}
inputTail:
{{- if .Values.fluentbit.inputTail.Buffer_Chunk_Size }}
Buffer_Chunk_Size: {{ .Values.fluentbit.inputTail.Buffer_Chunk_Size | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Buffer_Max_Size }}
Buffer_Max_Size: {{ .Values.fluentbit.inputTail.Buffer_Max_Size | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Mem_Buf_Limit }}
Mem_Buf_Limit: {{ .Values.fluentbit.inputTail.Mem_Buf_Limit | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Multiline_Flush }}
Multiline_Flush: {{ .Values.fluentbit.inputTail.Multiline_Flush | toString | quote }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Skip_Long_Lines }}
Skip_Long_Lines: {{ .Values.fluentbit.inputTail.Skip_Long_Lines | toString | quote }}
{{- end }}
{{- end }}
{{- with (concat (.Values.tolerations) (.Values.fluentbit.tolerations)) }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
{{- with .Values.fluentd.bufferStorageVolume }}
bufferStorageVolume: {{- toYaml . | nindent 6 }}
{{- end }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.fluentd.replicas }}
scaling:
replicas: {{ .Values.fluentd.replicas }}
{{- end }}
security:
podSecurityContext:
runAsUser: 100
{{- if .Values.global.cattle.psp.enabled }}
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.fluentd.env }}
envVars: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with (default .Values.tolerations .Values.fluentd.tolerations) }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with (default .Values.nodeSelector .Values.fluentd.nodeSelector) }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end -}}
{{- define "logging-operator.util.merge.logging" -}}
{{/* Top context to expose fields like `.Release` and `.Values` */}}
{{- $top := first . -}}
{{/* tpl is the template specific to the logging implementation */}}
{{- $tpl := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{/* Generic is the shared rancher logging setttings from `_generic_logging.yaml` */}}
{{- $generic := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{/* values are from the values.yaml */}}
{{- $values := $top.Values.loggingOverlay | default (dict ) -}}
####### {{$generic}}
{{/* the sources are merge right to left meaning tpl is the highest prcedence and values is the lowest */}}
{{- toYaml (merge $tpl $values $generic) -}}
{{- end -}}
{{- define "logging-operator.logging" -}}
{{- include "logging-operator.util.merge.logging" (append . "logging-operator.logging.tpl") -}}
{{- end -}}

View File

@ -0,0 +1,179 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "logging-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "logging-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Provides the namespace the chart will be installed in using the builtin .Release.Namespace,
or, if provided, a manually overwritten namespace value.
*/}}
{{- define "logging-operator.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{ .Values.namespaceOverride -}}
{{- else -}}
{{ .Release.Namespace }}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "logging-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "logging-operator.labels" -}}
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
helm.sh/chart: {{ include "logging-operator.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{- define "windowsEnabled" }}
{{- if not (kindIs "invalid" .Values.global.cattle.windows) }}
{{- if not (kindIs "invalid" .Values.global.cattle.windows.enabled) }}
{{- if .Values.global.cattle.windows.enabled }}
true
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "windowsPathPrefix" -}}
{{- trimSuffix "/" (default "c:\\" .Values.global.cattle.rkeWindowsPathPrefix | replace "\\" "/" | replace "//" "/" | replace "c:" "C:") -}}
{{- end -}}
{{- define "windowsKubernetesFilter" -}}
{{- printf "kubernetes.%s" ((include "windowsPathPrefix" .) | replace ":" "" | replace "/" ".") -}}
{{- end -}}
{{- define "windowsInputTailMount" -}}
{{- (include "windowsPathPrefix" .) | replace "C:" "" -}}
{{- end -}}
{{/*
Set the controlplane selector based on kubernetes distribution
*/}}
{{- define "controlplaneSelector" -}}
{{- $master := or .Values.additionalLoggingSources.rke2.enabled .Values.additionalLoggingSources.k3s.enabled -}}
{{- $defaultSelector := $master | ternary (dict "node-role.kubernetes.io/master" "true") (dict "node-role.kubernetes.io/controlplane" "true") -}}
{{ default $defaultSelector .Values.additionalLoggingSources.kubeAudit.nodeSelector | toYaml }}
{{- end -}}
{{/*
Set kube-audit file path prefix based on distribution
*/}}
{{- define "kubeAuditPathPrefix" -}}
{{- if .Values.additionalLoggingSources.rke.enabled -}}
{{ default "/var/log/kube-audit" .Values.additionalLoggingSources.kubeAudit.pathPrefix }}
{{- else if .Values.additionalLoggingSources.rke2.enabled -}}
{{ default "/var/lib/rancher/rke2/server/logs" .Values.additionalLoggingSources.kubeAudit.pathPrefix }}
{{- else -}}
{{ required "Directory PathPrefix of the kube-audit location is required" .Values.additionalLoggingSources.kubeAudit.pathPrefix }}
{{- end -}}
{{- end -}}
{{/*
Set kube-audit file name based on distribution
*/}}
{{- define "kubeAuditFilename" -}}
{{- if .Values.additionalLoggingSources.rke.enabled -}}
{{ default "audit-log.json" .Values.additionalLoggingSources.kubeAudit.auditFilename }}
{{- else if .Values.additionalLoggingSources.rke2.enabled -}}
{{ default "audit.log" .Values.additionalLoggingSources.kubeAudit.auditFilename }}
{{- else -}}
{{ required "Filename of the kube-audit log is required" .Values.additionalLoggingSources.kubeAudit.auditFilename }}
{{- end -}}
{{- end -}}
{{/*
A shared list of custom parsers for the vairous fluentbit pods rancher creates
*/}}
{{- define "logging-operator.parsers" -}}
[PARSER]
Name klog
Format regex
Regex ^(?<level>[IWEF])(?<timestamp>\d{4} \d{2}:\d{2}:\d{2}).\d{6} +?(?<thread_id>\d+) (?<filename>.+):(?<linenumber>\d+)] (?<message>.+)
Time_Key timestamp
Time_Format %m%d %T
[PARSER]
Name rancher
Format regex
Regex ^time="(?<timestamp>.+)" level=(?<level>.+) msg="(?<msg>.+)"$
Time_Key timestamp
Time_Format %FT%H:%M:%S
[PARSER]
Name etcd
Format json
Time_Key timestamp
Time_Format %FT%H:%M:%S.%L
{{- end -}}
{{/*
Set kubernetes log options if they are configured
*/}}
{{- define "requireFilterKubernetes" -}}
{{- if or .Values.fluentbit.filterKubernetes.Merge_Log .Values.fluentbit.filterKubernetes.Merge_Log_Key .Values.fluentbit.filterKubernetes.Merge_Trim .Values.fluentbit.filterKubernetes.Merge_Parser -}}
true
{{- end -}}
{{- end -}}
{{/*Fluent Bit Image Repository */}}
{{- define "logging-operator.fluentbitImageRepository" -}}
{{- if .Values.debug -}}
{{ template "system_default_registry" . }}{{ .Values.images.fluentbit_debug.repository }}
{{- else -}}
{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
{{- end -}}
{{- end -}}
{{/*Fluent Bit Image Tag */}}
{{- define "logging-operator.fluentbitImageTag" -}}
{{- if .Values.debug -}}
{{ .Values.images.fluentbit_debug.tag }}
{{- else -}}
{{ .Values.images.fluentbit.tag }}
{{- end -}}
{{- end -}}
{{/*Fluent Bit Image */}}
{{- define "logging-operator.fluentbitImage" -}}
{{ template "logging-operator.fluentbitImageRepository" . }}:{{ template "logging-operator.fluentbitImageTag" . }}
{{- end -}}

View File

@ -0,0 +1,318 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: {{ template "logging-operator.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
- namespaces
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
- pods
- serviceaccounts
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- extensions
resources:
- daemonsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- extensions
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
- policy
resources:
- podsecuritypolicies
verbs:
- create
- delete
- get
- list
- patch
- update
- use
- watch
- apiGroups:
- logging-extensions.banzaicloud.io
resources:
- eventtailers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging-extensions.banzaicloud.io
resources:
- eventtailers/status
verbs:
- get
- patch
- update
- apiGroups:
- logging-extensions.banzaicloud.io
resources:
- hosttailers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging-extensions.banzaicloud.io
resources:
- hosttailers/status
verbs:
- get
- patch
- update
- apiGroups:
- logging.banzaicloud.io
resources:
- clusterflows
- clusteroutputs
- flows
- loggings
- outputs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.banzaicloud.io
resources:
- clusterflows/status
- clusteroutputs/status
- flows/status
- loggings/status
- outputs/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
- servicemonitors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "logging-operator.fullname" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
subjects:
- kind: ServiceAccount
name: {{ template "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "logging-operator.fullname" . }}
{{- end }}

View File

@ -0,0 +1,6 @@
{{- if .Values.createCustomResource -}}
{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }}
{{ $.Files.Get $path }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.podLabels }}
{{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
args:
{{- range .Values.extraArgs }}
- {{ . }}
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
ports:
- name: http
containerPort: {{ .Values.http.port }}
{{- with .Values.env }}
env: {{ toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.securityContext }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts: {{ toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes: {{ toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.rbac.enabled }}
serviceAccountName: {{ include "logging-operator.fullname" . }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- define "logging-operator.logging.aks" -}}
{{- $logPath := "/var/log/azure/kubelet-status.log" -}}
metadata:
name: {{ .Release.Name }}-aks
spec:
fluentbit:
disableKubernetesFilter: true
extraVolumeMounts:
- source: {{ $logPath }}
destination: {{ $logPath }}
readOnly: true
inputTail:
Tag: "aks"
Path: {{ $logPath }}
{{- end -}}
{{- if .Values.additionalLoggingSources.aks.enabled }}
{{- include "logging-operator.logging" (list . "logging-operator.logging.aks") -}}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- define "logging-operator.logging.eks" -}}
{{- $logPath := "/var/log/messages" -}}
metadata:
name: {{ .Release.Name }}-eks
spec:
fluentbit:
disableKubernetesFilter: true
extraVolumeMounts:
- source: {{ $logPath }}
destination: {{ $logPath }}
readOnly: true
inputTail:
Tag: "eks"
Path: {{ $logPath }}
Parser: "syslog"
{{- end -}}
{{- if .Values.additionalLoggingSources.eks.enabled }}
{{- include "logging-operator.logging" (list . "logging-operator.logging.eks") -}}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- define "logging-operator.logging.gke" -}}
{{- $logPath := "/var/log/kube-proxy.log" -}}
metadata:
name: {{ .Release.Name }}-gke
spec:
fluentbit:
disableKubernetesFilter: true
extraVolumeMounts:
- source: {{ $logPath }}
destination: {{ $logPath }}
readOnly: true
inputTail:
Tag: "gke"
Path: {{ $logPath }}
{{- end -}}
{{- if .Values.additionalLoggingSources.gke.enabled }}
{{- include "logging-operator.logging" (list . "logging-operator.logging.gke") -}}
{{- end }}

View File

@ -0,0 +1,57 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "systemd") }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-k3s
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[SERVICE]
Flush 1
Grace 5
Daemon Off
Log_Level info
Coro_Stack_Size 24576
Parsers_File parsers.conf
[INPUT]
Name systemd
Tag k3s
Path {{ .Values.systemdLogPath }}
Systemd_Filter _SYSTEMD_UNIT=k3s.service
{{- if .Values.additionalLoggingSources.k3s.stripUnderscores }}
Strip_Underscores On
{{- end }}
Systemd_Filter _SYSTEMD_UNIT=k3s-agent.service
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser klog
Reserve_Data On
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser rancher
Reserve_Data On
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser etcd
Reserve_Data On
[OUTPUT]
Name forward
Match *
Host {{ .Release.Name }}-root-fluentd.{{ .Release.Namespace }}.svc
Port 24240
Retry_Limit False
parsers.conf: |
{{ include "logging-operator.parsers" . | indent 4 }}
{{- end }}

View File

@ -0,0 +1,110 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "systemd") }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-k3s-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-k3s-journald-aggregator
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/loggings/k3s/configmap.yaml") . | sha256sum }}
name: "{{ .Release.Name }}-k3s-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-k3s-journald-aggregator
spec:
containers:
- name: fluentbit
image: "{{ template "logging-operator.fluentbitImage" . }}"
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
volumeMounts:
- mountPath: /fluent-bit/etc/
name: config
- mountPath: {{ .Values.systemdLogPath | default "/var/log/journal" }}
name: journal
readOnly: true
- mountPath: /etc/machine-id
name: machine-id
readOnly: true
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: "{{ .Release.Name }}-k3s-journald-aggregator"
volumes:
- name: config
configMap:
name: "{{ .Release.Name }}-k3s"
- name: journal
hostPath:
path: {{ .Values.systemdLogPath | default "/var/log/journal" }}
- name: machine-id
hostPath:
path: /etc/machine-id
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Release.Name }}-k3s-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
{{- if .Values.global.cattle.psp.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ .Release.Name }}-k3s-journald-aggregator"
rules:
- apiGroups:
- policy
resourceNames:
- "{{ .Release.Name }}-k3s-journald-aggregator"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ .Release.Name }}-k3s-journald-aggregator"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ .Release.Name }}-k3s-journald-aggregator"
subjects:
- kind: ServiceAccount
name: "{{ .Release.Name }}-k3s-journald-aggregator"
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: "{{ .Release.Name }}-k3s-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
readOnlyRootFilesystem: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- secret
- hostPath
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- define "logging-operator.logging.k3s-openrc" -}}
{{- $logPath := "/var/log/k3s.log" -}}
metadata:
name: {{ .Release.Name }}-k3s
spec:
fluentbit:
disableKubernetesFilter: true
extraVolumeMounts:
- source: {{ $logPath }}
destination: {{ $logPath }}
readOnly: true
inputTail:
Tag: "k3s"
Path: {{ $logPath }}
Path_Key: filename
{{- end -}}
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "openrc")}}
{{- include "logging-operator.logging" (list . "logging-operator.logging.k3s-openrc") -}}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- define "logging-operator.logging.kube-audit" -}}
metadata:
name: {{ .Release.Name }}-kube-audit
spec:
{{- if .Values.additionalLoggingSources.kubeAudit.loggingRef }}
loggingRef: {{ .Values.additionalLoggingSources.kubeAudit.loggingRef }}
{{- end }}
fluentbit:
disableKubernetesFilter: true
extraVolumeMounts:
- source: {{ template "kubeAuditPathPrefix" . }}
destination: "/kube-audit-logs"
readOnly: true
inputTail:
Tag: {{ .Values.additionalLoggingSources.kubeAudit.fluentbit.logTag }}
Path: /kube-audit-logs/{{ template "kubeAuditFilename" . }}
Parser: json
{{- with (concat (.Values.tolerations) (.Values.fluentbit.tolerations) (.Values.additionalLoggingSources.kubeAudit.fluentbit.tolerations)) }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
nodeSelector: {{ include "controlplaneSelector" . | nindent 6 }}
{{- end -}}
{{- if .Values.additionalLoggingSources.kubeAudit.enabled }}
{{- include "logging-operator.logging" (list . "logging-operator.logging.kube-audit") -}}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-rke
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[SERVICE]
Log_Level {{ .Values.additionalLoggingSources.rke.fluentbit.log_level }}
Parsers_File parsers.conf
[INPUT]
Tag rke
Name tail
Path_Key filename
Parser docker
DB /tail-db/tail-containers-state.db
Mem_Buf_Limit {{ .Values.additionalLoggingSources.rke.fluentbit.mem_buffer_limit }}
Path /var/lib/rancher/rke/log/*.log
[OUTPUT]
Name forward
Match *
Host {{ .Release.Name }}-root-fluentd.{{ .Release.Namespace }}.svc
Port 24240
Retry_Limit False
{{- end }}

View File

@ -0,0 +1,122 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
{{- $containers := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-rke-aggregator
template:
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-rke-aggregator
spec:
containers:
- name: fluentbit
image: "{{ template "logging-operator.fluentbitImage" . }}"
volumeMounts:
- mountPath: /var/lib/rancher/rke/log/
name: indir
- mountPath: {{ $containers }}
name: containers
- mountPath: /tail-db
name: positiondb
- mountPath: /fluent-bit/etc/fluent-bit.conf
name: config
subPath: fluent-bit.conf
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
volumes:
- name: indir
hostPath:
path: /var/lib/rancher/rke/log/
type: DirectoryOrCreate
- name: containers
hostPath:
path: {{ $containers }}
type: DirectoryOrCreate
- name: positiondb
emptyDir: {}
- name: config
configMap:
name: "{{ .Release.Name }}-rke"
serviceAccountName: "{{ .Release.Name }}-rke-aggregator"
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 8 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
{{- if .Values.global.cattle.psp.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
rules:
- apiGroups:
- policy
resourceNames:
- "{{ .Release.Name }}-rke-aggregator"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ .Release.Name }}-rke-aggregator"
subjects:
- kind: ServiceAccount
name: "{{ .Release.Name }}-rke-aggregator"
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
allowPrivilegeEscalation: false
allowedHostPaths:
- pathPrefix: {{ $containers }}
readOnly: false
- pathPrefix: /var/lib/rancher/rke/log/
readOnly: false
- pathPrefix: /var/lib/rancher/logging/
readOnly: false
fsGroup:
rule: RunAsAny
readOnlyRootFilesystem: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- secret
- hostPath
{{- end }}
{{- end }}

View File

@ -0,0 +1,69 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-rke2
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[SERVICE]
Flush 1
Grace 5
Daemon Off
Log_Level info
Coro_Stack_Size 24576
Parsers_File parsers.conf
[INPUT]
Name systemd
Tag rke2
Path {{ .Values.systemdLogPath }}
Systemd_Filter _SYSTEMD_UNIT=rke2-server.service
Systemd_Filter _SYSTEMD_UNIT=rke2-agent.service
{{- if .Values.additionalLoggingSources.rke2.stripUnderscores }}
Strip_Underscores On
{{- end }}
[INPUT]
Name tail
Tag rke2
Path /var/lib/rancher/rke2/agent/logs/kubelet.log
[FILTER]
Name parser
Match *
Key_Name log
Parser klog
Reserve_Data On
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser klog
Reserve_Data On
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser rancher
Reserve_Data On
[FILTER]
Name parser
Match *
Key_Name MESSAGE
Parser etcd
Reserve_Data On
[OUTPUT]
Name forward
Match *
Host {{ .Release.Name }}-root-fluentd.{{ .Release.Namespace }}.svc
Port 24240
Retry_Limit False
parsers.conf: |
{{ include "logging-operator.parsers" . | indent 4 }}
{{- end }}

View File

@ -0,0 +1,116 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-rke2-journald-aggregator
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/loggings/rke2/configmap.yaml") . | sha256sum }}
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-rke2-journald-aggregator
spec:
containers:
- name: fluentbit
image: "{{ template "logging-operator.fluentbitImage" . }}"
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
volumeMounts:
- mountPath: /fluent-bit/etc/
name: config
- mountPath: {{ .Values.systemdLogPath | default "/var/log/journal" }}
name: journal
readOnly: true
- mountPath: "/var/lib/rancher/rke2/agent/logs/kubelet.log"
name: kubelet
readOnly: true
- mountPath: /etc/machine-id
name: machine-id
readOnly: true
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: "{{ .Release.Name }}-rke2-journald-aggregator"
volumes:
- name: config
configMap:
name: "{{ .Release.Name }}-rke2"
- name: journal
hostPath:
path: {{ .Values.systemdLogPath | default "/var/log/journal" }}
- name: kubelet
hostPath:
path: "/var/lib/rancher/rke2/agent/logs/kubelet.log"
- name: machine-id
hostPath:
path: /etc/machine-id
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
{{- if .Values.global.cattle.psp.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
rules:
- apiGroups:
- policy
resourceNames:
- "{{ .Release.Name }}-rke2-journald-aggregator"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ .Release.Name }}-rke2-journald-aggregator"
subjects:
- kind: ServiceAccount
name: "{{ .Release.Name }}-rke2-journald-aggregator"
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
readOnlyRootFilesystem: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- secret
- hostPath
{{- end }}
{{- end }}

View File

@ -0,0 +1,82 @@
{{- define "logging-operator.logging.root" -}}
{{- $containerLogPath := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }}
metadata:
name: "{{ .Release.Name }}-root"
spec:
{{- if (include "windowsEnabled" .) }}
nodeAgents:
- name: win-agent
profile: windows
nodeAgentFluentbit:
daemonSet:
spec:
template:
spec:
containers:
- image: {{ template "system_default_registry" . }}{{ .Values.images.nodeagent_fluentbit.repository }}:{{ .Values.images.nodeagent_fluentbit.tag }}
name: fluent-bit
tls:
enabled: {{ .Values.nodeAgents.tls.enabled | default false }}
{{- if .Values.additionalLoggingSources.rke.enabled }}
- name: win-agent-rke
profile: windows
nodeAgentFluentbit:
filterKubernetes:
Kube_Tag_Prefix: "{{ template "windowsKubernetesFilter" . }}.var.lib.rancher.rke.log."
inputTail:
Path: "{{ template "windowsPathPrefix" . }}/var/lib/rancher/rke/log"
{{- if .Values.fluentbit.inputTail.Buffer_Chunk_Size }}
Buffer_Chunk_Size: {{ .Values.fluentbit.inputTail.Buffer_Chunk_Size | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Buffer_Max_Size }}
Buffer_Max_Size: {{ .Values.fluentbit.inputTail.Buffer_Max_Size | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Mem_Buf_Limit }}
Mem_Buf_Limit: {{ .Values.fluentbit.inputTail.Mem_Buf_Limit | toString }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Multiline_Flush }}
Multiline_Flush: {{ .Values.fluentbit.inputTail.Multiline_Flush | toString | quote }}
{{- end }}
{{- if .Values.fluentbit.inputTail.Skip_Long_Lines }}
Skip_Long_Lines: {{ .Values.fluentbit.inputTail.Skip_Long_Lines | toString | quote }}
{{- end }}
extraVolumeMounts:
- source: "{{ template "windowsInputTailMount" . }}/var/lib/rancher/rke/log"
destination: "{{ template "windowsInputTailMount" . }}/var/lib/rancher/rke/log"
readOnly: true
daemonSet:
spec:
template:
spec:
containers:
- image: "{{ template "system_default_registry" . }}{{ .Values.images.nodeagent_fluentbit.repository }}:{{ .Values.images.nodeagent_fluentbit.tag }}"
name: fluent-bit
tls:
enabled: {{ .Values.nodeAgents.tls.enabled | default false }}
{{- end }}
{{- end }}
fluentbit:
{{- if .Values.global.dockerRootDirectory }}
mountPath: {{ $containerLogPath }}
extraVolumeMounts:
- source: {{ $containerLogPath }}
destination: {{ $containerLogPath }}
readOnly: true
{{- end }}
{{- if (include "requireFilterKubernetes" .) }}
filterKubernetes:
{{- if .Values.fluentbit.filterKubernetes.Merge_Log }}
Merge_Log: "{{ .Values.fluentbit.filterKubernetes.Merge_Log }}"
{{- end }}
{{- if .Values.fluentbit.filterKubernetes.Merge_Log_Key }}
Merge_Log_Key: "{{ .Values.fluentbit.filterKubernetes.Merge_Log_Key }}"
{{- end }}
{{- if .Values.fluentbit.filterKubernetes.Merge_Log_Trim }}
Merge_Log_Trim: "{{ .Values.fluentbit.filterKubernetes.Merge_Log_Trim }}"
{{- end }}
{{- if .Values.fluentbit.filterKubernetes.Merge_Parser }}
Merge_Parser: "{{ .Values.fluentbit.filterKubernetes.Merge_Parser }}"
{{- end }}
{{- end }}
{{- end -}}
{{- include "logging-operator.logging" (list . "logging-operator.logging.root") -}}

View File

@ -0,0 +1,34 @@
{{ if and .Values.global.cattle.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.logging-operator
namespace: {{ include "logging-operator.namespace" . }}
annotations:
{{- if .Values.rbac.psp.annotations }}
{{ toYaml .Values.rbac.psp.annotations | indent 4 }}
{{- end }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: MustRunAsNonRoot
fsGroup:
rule: MustRunAs
ranges:
- min: 1
max: 65535
supplementalGroups:
rule: MustRunAs
ranges:
- min: 1
max: 65535
seLinux:
rule: RunAsAny
volumes:
- secret
- configMap
{{ end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
type: ClusterIP
{{- with .Values.http.service.clusterIP }}
clusterIP: {{ . }}
{{- end }}
ports:
- port: {{ .Values.http.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,30 @@
{{ if .Values.monitoring.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
{{- with .Values.monitoring.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{ include "logging-operator.labels" . | indent 6 }}
endpoints:
- port: http
path: /metrics
{{- with .Values.monitoring.serviceMonitor.metricsRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.monitoring.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ include "logging-operator.namespace" . }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-view"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
- clusterflows
- clusteroutputs
verbs:
- get
- list
- watch

View File

@ -0,0 +1,20 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "logging-extensions.banzaicloud.io/v1alpha1/EventTailer" false -}}
# {{- set $found "logging-extensions.banzaicloud.io/v1alpha1/HostTailer" false -}}
# {{- set $found "logging.banzaicloud.io/v1alpha1/ClusterFlow" false -}}
# {{- set $found "logging.banzaicloud.io/v1alpha1/ClusterOutput" false -}}
# {{- set $found "logging.banzaicloud.io/v1alpha1/Flow" false -}}
# {{- set $found "logging.banzaicloud.io/v1alpha1/Logging" false -}}
# {{- set $found "logging.banzaicloud.io/v1alpha1/Output" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,5 @@
#{{- if .Values.global.dockerRootDirectory }}
#{{- if or (hasSuffix "/containers" .Values.global.dockerRootDirectory) (hasSuffix "/" .Values.global.dockerRootDirectory) }}
#{{- required "global.dockerRootDirectory must not end with suffix: '/' or '/containers'" "" -}}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,240 @@
# Default values for logging-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: rancher/mirrored-banzaicloud-logging-operator
tag: 3.17.10
pullPolicy: IfNotPresent
env: []
volumes: []
volumeMounts: []
extraArgs:
- -enable-leader-election=true
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
## Pod custom labels
##
podLabels: {}
annotations: {}
## Deploy CRDs used by Logging Operator.
##
createCustomResource: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
affinity: {}
http:
# http listen port number
port: 8080
# Service definition for query http service
service:
type: ClusterIP
clusterIP: None
# Annotations to query http service
annotations: {}
# Labels to query http service
labels: {}
rbac:
enabled: true
psp:
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
## SecurityContext holds pod-level security attributes and common container settings.
## This defaults to non-root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
podSecurityContext: {}
# runAsNonRoot: true
# runAsUser: 1000
# fsGroup: 2000
securityContext: {}
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# capabilities:
# drop: ["ALL"]
## Operator priorityClassName
##
priorityClassName: {}
monitoring:
# Create a Prometheus Operator ServiceMonitor object
serviceMonitor:
enabled: false
additionalLabels: {}
metricRelabelings: []
relabelings: []
serviceAccount:
annotations: {}
###################################
# Rancher Logging Operator Values #
###################################
# Enable debug to use fluent-bit images that allow exec
debug: false
# Disable persistent volumes for buffers
disablePvc: true
# If your additional logging sources collect logs from systemd configure the systemd log path here
systemdLogPath: "/run/log/journal"
global:
cattle:
systemDefaultRegistry: ""
# Uncomment the below two lines to either enable or disable Windows logging. If this chart is
# installed via the Rancher UI, it will set this value to "true" if the cluster is a Windows
# cluster. In that scenario, if you would like to disable Windows logging on Windows clusters,
# set the value below to "false".
# windows:
# enabled: true
psp:
enabled: false
# Change the "dockerRootDirectory" if the default Docker directory has changed.
dockerRootDirectory: ""
rkeWindowsPathPrefix: "c:\\"
seLinux:
enabled: false
images:
config_reloader:
repository: rancher/mirrored-jimmidyson-configmap-reload
tag: v0.4.0
fluentbit:
repository: rancher/mirrored-fluent-fluent-bit
tag: 1.9.5
nodeagent_fluentbit:
os: "windows"
repository: rancher/fluent-bit
tag: 1.8.9
fluentbit_debug:
repository: rancher/mirrored-fluent-fluent-bit
tag: 1.9.5-debug
fluentd:
repository: rancher/mirrored-banzaicloud-fluentd
tag: v1.14.6-alpine-5
additionalLoggingSources:
rke:
enabled: false
fluentbit:
log_level: "info"
mem_buffer_limit: "5MB"
rke2:
enabled: false
stripUnderscores: false
k3s:
enabled: false
container_engine: "systemd"
stripUnderscores: false
aks:
enabled: false
eks:
enabled: false
gke:
enabled: false
kubeAudit:
auditFilename: ""
enabled: false
pathPrefix: ""
fluentbit:
logTag: kube-audit
tolerations:
- key: node-role.kubernetes.io/controlplane
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/etcd
value: "true"
effect: NoExecute
# configures node agent options for windows node agents
nodeAgents:
tls:
enabled: false
# These settings apply to every Logging CR, including vendor Logging CRs enabled in "additionalLoggingSources".
# Changing these affects every Logging CR installed.
fluentd:
bufferStorageVolume: {}
livenessProbe:
tcpSocket:
port: 24240
initialDelaySeconds: 30
periodSeconds: 15
nodeSelector: {}
resources: {}
tolerations: {}
env: []
fluentbit:
inputTail:
Buffer_Chunk_Size: ""
Buffer_Max_Size: ""
Mem_Buf_Limit: ""
Multiline_Flush: ""
Skip_Long_Lines: ""
resources: {}
tolerations:
- key: node-role.kubernetes.io/controlplane
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/etcd
value: "true"
effect: NoExecute
filterKubernetes:
Merge_Log: ""
Merge_Log_Key: ""
Merge_Log_Trim: ""
Merge_Parser: ""
# DO NOT SET THIS UNLESS YOU KNOW WHAT YOU ARE DOING.
# Setting fields on this object can break rancher logging or cause unexpected behavior. It is intended to be used if you
# need to configure functionality not exposed by rancher logging. It is highly recommended you check the `app-readme.md`
# for the functionality you need before modifying this object.
# this object will be merged with every logging CR created by this chart. Any fields that collide with fields from the
# settings above will be overridden. Any fields that collide with fields set in the files in `templates/loggings` will
# be ignored.

View File

@ -1867,6 +1867,32 @@ entries:
- assets/fleet-crd/fleet-crd-0.3.000.tgz
version: 0.3.000
harvester-cloud-provider:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester Cloud Provider
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-cloud-provider
catalog.cattle.io/ui-component: harvester-cloud-provider
catalog.cattle.io/upstream-version: 0.1.14
apiVersion: v2
appVersion: v0.1.5
created: "2023-09-26T15:37:48.303455789+02:00"
description: A Helm chart for Harvester Cloud Provider
digest: f81655874efcb403ae1832babfb51f0ed3f8436191c54f19247bf9871d4436bf
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-cloud-provider
type: application
urls:
- assets/harvester-cloud-provider/harvester-cloud-provider-103.0.0+up0.1.14.tgz
version: 103.0.0+up0.1.14
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester Cloud Provider
@ -2075,6 +2101,32 @@ entries:
- assets/harvester-cloud-provider/harvester-cloud-provider-100.0.0+up0.1.8.tgz
version: 100.0.0+up0.1.8
harvester-csi-driver:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-csi-driver
catalog.cattle.io/ui-component: harvester-csi-driver
catalog.cattle.io/upstream-version: 0.1.16
apiVersion: v2
appVersion: v0.1.5
created: "2023-09-26T11:33:56.118519701+08:00"
description: A Helm chart for Harvester CSI driver
digest: f1fa09b30e1a5a57a5a5630a4ea39cf780fd45fc6ccecddc737627a5e9d9435f
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-csi-driver
type: application
urls:
- assets/harvester-csi-driver/harvester-csi-driver-103.0.0+up0.1.16.tgz
version: 103.0.0+up0.1.16
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
@ -7757,6 +7809,27 @@ entries:
- assets/rancher-cis-benchmark-crd/rancher-cis-benchmark-crd-1.0.100.tgz
version: 1.0.100
rancher-csp-adapter:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher CSP Adapter
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-csp-adapter-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-csp-adapter
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 3.0.0
created: "2023-10-23T12:48:49.961330205-05:00"
description: Cloud Service Provider Marketplace Adapter for Rancher. Monitors
Rancher entitlements against usage.
digest: 263bf96e0e6ce951e84dc6007c2b4e78340e75981da1e6f78227bae350121ef8
name: rancher-csp-adapter
urls:
- assets/rancher-csp-adapter/rancher-csp-adapter-103.0.0+up3.0.0.tgz
version: 103.0.0+up3.0.0
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher CSP Adapter
@ -10961,6 +11034,35 @@ entries:
- assets/rancher-kube-state-metrics/rancher-kube-state-metrics-2.13.101.tgz
version: 2.13.101
rancher-logging:
- annotations:
catalog.cattle.io/auto-install: rancher-logging-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/deploys-on-os: windows
catalog.cattle.io/display-name: Logging
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-logging
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: logging
catalog.cattle.io/upstream-version: 3.17.10
apiVersion: v1
appVersion: 3.17.10
created: "2023-09-01T14:06:06.266214607+12:00"
description: Collects and filter logs using highly configurable CRDs. Powered
by Banzai Cloud Logging Operator.
digest: 4579e2d8b4a325ed9e2981f80ccb89e4fe2056379bf8e7e64b4c165b18d481af
icon: https://charts.rancher.io/assets/logos/logging.svg
keywords:
- logging
- monitoring
- security
name: rancher-logging
urls:
- assets/rancher-logging/rancher-logging-103.0.0+up3.17.10.tgz
version: 103.0.0+up3.17.10
- annotations:
catalog.cattle.io/auto-install: rancher-logging-crd=match
catalog.cattle.io/certified: rancher
@ -11423,6 +11525,20 @@ entries:
- assets/rancher-logging/rancher-logging-3.6.000.tgz
version: 3.6.000
rancher-logging-crd:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/release-name: rancher-logging-crd
apiVersion: v1
created: "2023-09-01T14:06:06.301913469+12:00"
description: Installs the CRDs for rancher-logging.
digest: 703903d8a06c01f35da538a281c95cb354af7f606886e48f33235f7740881587
name: rancher-logging-crd
type: application
urls:
- assets/rancher-logging-crd/rancher-logging-crd-103.0.0+up3.17.10.tgz
version: 103.0.0+up3.17.10
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"

View File

@ -5,12 +5,12 @@
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester Cloud Provider
- catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.26.0-0'
+ catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.27.0-0'
+ catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
- catalog.cattle.io/rancher-version: '>= 2.6.1-0 < 2.8.0-0'
+ catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
+ catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-cloud-provider
catalog.cattle.io/ui-component: harvester-cloud-provider
+ catalog.cattle.io/upstream-version: 0.1.14

View File

@ -1,2 +1,3 @@
url: https://github.com/harvester/charts/releases/download/harvester-cloud-provider-0.1.14/harvester-cloud-provider-0.1.14.tgz
version: 102.0.1
version: 103.0.0
doNotRelease: false

View File

@ -5,12 +5,12 @@
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
- catalog.cattle.io/kube-version: '>= 1.18.0-0 < 1.26.0-0'
+ catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.27.0-0'
+ catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux
- catalog.cattle.io/rancher-version: '>= 2.6.1-0 < 2.8.0-0'
+ catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
+ catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: harvester-csi-driver
catalog.cattle.io/ui-component: harvester-csi-driver
+ catalog.cattle.io/upstream-version: 0.1.16

View File

@ -1,2 +1,3 @@
url: https://github.com/harvester/charts/releases/download/harvester-csi-driver-0.1.16/harvester-csi-driver-0.1.16.tgz
version: 102.0.1
version: 103.0.0
doNotRelease: false

View File

@ -1,2 +1,2 @@
url: https://github.com/rancher/csp-adapter/releases/download/v2.0.2/rancher-csp-adapter-2.0.2.tgz
version: 2.0.2
url: https://github.com/rancher/csp-adapter/releases/download/v3.0.0/rancher-csp-adapter-3.0.0.tgz
version: 103.0.0

View File

@ -6,11 +6,11 @@
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/deploys-on-os: windows
+ catalog.cattle.io/display-name: Logging
+ catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.27.0-0'
+ catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
+ catalog.cattle.io/namespace: cattle-logging-system
+ catalog.cattle.io/permits-os: linux,windows
+ catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
+ catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
+ catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
+ catalog.cattle.io/release-name: rancher-logging
+ catalog.cattle.io/type: cluster-tool
+ catalog.cattle.io/ui-component: logging

View File

@ -1,8 +1,9 @@
url: https://kubernetes-charts.banzaicloud.com/charts/logging-operator-3.17.10.tgz
version: 102.0.1
version: 103.0.0
additionalCharts:
- workingDir: charts-crd
crdOptions:
templateDirectory: crd-template
crdDirectory: templates
addCRDValidationToMainChart: true
doNotRelease: false

View File

@ -1510,6 +1510,7 @@ sync:
- v2.0.0
- v2.0.1
- v2.0.2
- v3.0.0
- source: docker.io/rancher/rancher-webhook
target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/rancher-webhook'
type: repository

View File

@ -1,30 +1,10 @@
epinio:
- 103.0.0+up1.8.1
- 103.0.1+up1.9.0
- 103.0.2+up1.10.0
- 102.0.4+up1.9.0
epinio-crd:
- 103.0.0+up1.8.1
- 103.0.1+up1.9.0
- 103.0.2+up1.10.0
- 102.0.4+up1.9.0
longhorn:
- 103.0.0+up1.3.3
- 103.1.0+up1.4.3
- 103.2.0+up1.5.1
longhorn-crd:
- 103.0.0+up1.3.3
- 103.1.0+up1.4.3
- 103.2.0+up1.5.1
neuvector:
- 102.0.4+up2.6.2
- 102.0.5+up2.6.4
- 103.0.0+up2.6.4
neuvector-crd:
- 102.0.4+up2.6.2
- 102.0.5+up2.6.4
- 103.0.0+up2.6.4
neuvector-monitor:
- 102.0.4+up2.6.2
- 102.0.5+up2.6.4
- 103.0.0+up2.6.4
harvester-csi-driver:
- 103.0.0+up0.1.16
harvester-cloud-provider:
- 103.0.0+up0.1.14
rancher-csp-adapter:
- 103.0.0+up3.0.0
rancher-logging:
- 103.0.0+up3.17.10
rancher-logging-crd:
- 103.0.0+up3.17.10