Merge pull request #1885 from rancher/dev-v2.6

[release-v2.6] Release charts for v2.6.5
pull/1908/head
Sheilagh Morlan 2022-05-11 18:21:05 -07:00 committed by GitHub
commit bfc397edc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
545 changed files with 93181 additions and 251 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester Cloud Provider
catalog.cattle.io/kube-version: '>= 1.18'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.6.1-0 <= 2.6.99-0'
catalog.cattle.io/release-name: harvester-cloud-provider
catalog.cattle.io/ui-component: harvester-cloud-provider
catalog.cattle.io/upstream-version: 0.1.8
apiVersion: v2
appVersion: v0.1.3
description: A Helm chart for Harvester Cloud Provider
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-cloud-provider
type: application
version: 100.0.2+up0.1.12

View File

@ -0,0 +1,3 @@
replicasCount: 1
# It's an existent but invalid kubeconfig, just for helm installation testing in kind
cloudConfigPath: "/etc/kubernetes/admin.conf"

View File

@ -0,0 +1,11 @@
categories:
- infrastructure
- harvester
namespace: kube-system
questions:
- variable: cloudConfigPath
label: Cloud config file path
description: "Specify the path of the cloud config."
group: "Default"
type: string
default: "/etc/kubernetes/cloud-config"

View File

@ -0,0 +1,69 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "harvester-cloud-provider.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "harvester-cloud-provider.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "harvester-cloud-provider.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "harvester-cloud-provider.labels" -}}
helm.sh/chart: {{ include "harvester-cloud-provider.chart" . }}
{{ include "harvester-cloud-provider.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "harvester-cloud-provider.selectorLabels" -}}
app.kubernetes.io/name: {{ include "harvester-cloud-provider.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "harvester-cloud-provider.serviceAccountName" -}}
{{- default (include "harvester-cloud-provider.fullname" .) .Values.serviceAccount.name }}
{{- end }}
{{/*
Global system default registry
*/}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,53 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
name: {{ include "harvester-cloud-provider.name" . }}
spec:
replicas: {{ .Values.replicasCount}}
selector:
matchLabels:
{{- include "harvester-cloud-provider.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "harvester-cloud-provider.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: {{ include "harvester-cloud-provider.name" . }}
hostNetwork: true
containers:
- name: {{ include "harvester-cloud-provider.name" . }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --cloud-config=/etc/kubernetes/cloud-config
{{- if ne .Values.global.cattle.clusterName "" }}
- --cluster-name={{ .Values.global.cattle.clusterName }}
{{- end }}
command:
- harvester-cloud-provider
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/kubernetes/cloud-config
name: cloud-config
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: cloud-config
hostPath:
path: {{ required "A valid cloudConfigPath is required!" .Values.cloudConfigPath }}
type: File

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
rules:
- apiGroups: [ "" ]
resources: [ "services", "nodes", "events" ]
verbs: [ "get", "watch", "list", "update", "create", "patch" ]
- apiGroups: [ "" ]
resources: [ "nodes/status", "services/status" ]
verbs: [ "update", "patch" ]
- apiGroups: [ "coordination.k8s.io" ]
resources: [ "leases" ]
verbs: [ "get", "update", "create" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "harvester-cloud-provider.name" . }}
labels:
{{- include "harvester-cloud-provider.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "harvester-cloud-provider.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "harvester-cloud-provider.name" . }}
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,71 @@
# Default values for harvester-cloud-provider.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicasCount: 1
image:
repository: rancher/harvester-cloud-provider
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: v0.1.3
cloudConfigPath: "/etc/kubernetes/cloud-config"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Equal
value: "true"
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Equal
- key: cattle.io/os
operator: Equal
value: "linux"
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- harvester-cloud-provider
topologyKey: kubernetes.io/hostname
global:
cattle:
systemDefaultRegistry: ""
clusterName: ""

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Harvester CSI Driver
catalog.cattle.io/kube-version: '>= 1.18'
catalog.cattle.io/namespace: kube-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/rancher-version: '>= 2.6.1-0 <= 2.6.99-0'
catalog.cattle.io/release-name: harvester-csi-driver
catalog.cattle.io/ui-component: harvester-csi-driver
catalog.cattle.io/upstream-version: 0.1.9
apiVersion: v2
appVersion: v0.1.3
description: A Helm chart for Harvester CSI driver
keywords:
- infrastructure
- harvester
maintainers:
- name: harvester
name: harvester-csi-driver
type: application
version: 100.0.2+up0.1.11

View File

@ -0,0 +1,11 @@
categories:
- infrastructure
- harvester
namespace: kube-system
questions:
- variable: cloudConfig.hostPath
label: Cloud config file path
description: "Specify the path of the cloud config."
group: "Default"
type: string
default: "/etc/kubernetes/cloud-config"

View File

@ -0,0 +1 @@
Successfully deployed Harvester CSI driver to the {{ .Release.Namespace }} namespace.

View File

@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "harvester-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "harvester-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "harvester-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "harvester-csi-driver.labels" -}}
helm.sh/chart: {{ include "harvester-csi-driver.chart" . }}
{{ include "harvester-csi-driver.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "harvester-csi-driver.selectorLabels" -}}
app.kubernetes.io/name: {{ include "harvester-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Global system default registry
*/}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: driver.harvesterhci.io
spec:
attachRequired: true
fsGroupPolicy: ReadWriteOnceWithFSType
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View File

@ -0,0 +1,149 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-driver
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --kubelet-registration-path={{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io/csi.sock
env:
- name: ADDRESS
value: /csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/driver.harvesterhci.io-reg.sock
/csi//*
name: node-driver-registrar
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi/
name: socket-dir
- mountPath: /registration
name: registration-dir
- args:
- --nodeid=$(NODE_ID)
- --endpoint=$(CSI_ENDPOINT)
- --kubeconfig=/var/lib/harvester/cloud-provider-config
env:
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
image: {{ template "system_default_registry" . }}{{ .Values.image.harvester.csiDriver.repository }}:{{ .Values.image.harvester.csiDriver.tag | default .Chart.AppVersion }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -f /csi//*
name: harvester-csi-driver
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- SYS_ADMIN
privileged: true
volumeMounts:
- name: cloud-config
readOnly: true
mountPath: /var/lib/harvester
- name: kubernetes
readOnly: true
mountPath: /etc/kubernetes
- mountPath: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
mountPropagation: Bidirectional
name: kubernetes-csi-dir
- mountPath: /csi/
name: socket-dir
- mountPath: {{ .Values.kubeletRootDir }}/pods
mountPropagation: Bidirectional
name: pods-mount-dir
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /rootfs
mountPropagation: Bidirectional
name: host
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostPID: true
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: cloud-config
{{- if .Values.cloudConfig.secretName }}
secret:
secretName: {{ .Values.cloudConfig.secretName }}
{{- else }}
hostPath:
path: {{ .Values.cloudConfig.hostPath }}
type: DirectoryOrCreate
{{- end }}
- hostPath:
path: /etc/kubernetes
type: DirectoryOrCreate
name: kubernetes
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate
name: kubernetes-csi-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: {{ .Values.kubeletRootDir }}/pods
type: DirectoryOrCreate
name: pods-mount-dir
- hostPath:
path: /dev
name: host-dev
- hostPath:
path: /sys
name: host-sys
- hostPath:
path: /
name: host
- hostPath:
path: /lib/modules
name: lib-modules

View File

@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "harvester-csi-driver.name" . }}-controllers
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicasCount }}
selector:
matchLabels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
component: csi-controllers
{{- include "harvester-csi-driver.selectorLabels" . | nindent 8 }}
spec:
containers:
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-resizer
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
- --default-fstype=ext4
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-provisioner
volumeMounts:
- mountPath: /csi/
name: socket-dir
- args:
- --v=5
- --csi-address=$(ADDRESS)
- --timeout=1m50s
- --leader-election
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: {{ template "system_default_registry" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: csi-attacher
volumeMounts:
- mountPath: /csi/
name: socket-dir
serviceAccountName: {{ include "harvester-csi-driver.name" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- hostPath:
path: {{ .Values.kubeletRootDir }}/harvester-plugins/driver.harvesterhci.io
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "harvester-csi-driver.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "harvester-csi-driver.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "harvester-csi-driver.name" . }}
labels:
{{- include "harvester-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [ "coordination.k8s.io" ]
resources: [ "leases" ]
verbs: [ "get", "watch", "list", "delete", "update", "create" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csistoragecapacities" ]
verbs: [ "get", "list", "watch", "create", "update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: [ "get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create","update", "patch", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csinodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "apps" ]
resources: [ "replicasets" ]
verbs: [ "get" ]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "volumeattachments/status" ]
verbs: [ "patch" ]

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: harvester
annotations:
storageclass.kubernetes.io/is-default-class: "true"
allowVolumeExpansion: true
provisioner: driver.harvesterhci.io
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -0,0 +1,54 @@
# Default values for harvester-csi-driver.
replicasCount: 3
image:
harvester:
csiDriver:
repository: rancher/harvester-csi-driver
# Overrides the image tag whose default is the chart appVersion.
tag: ""
csi:
nodeDriverRegistrar:
repository: rancher/mirrored-longhornio-csi-node-driver-registrar
tag: v2.3.0
resizer:
repository: rancher/mirrored-longhornio-csi-resizer
tag: v1.2.0
provisioner:
repository: rancher/mirrored-longhornio-csi-provisioner
tag: v2.1.2
attacher:
repository: rancher/mirrored-longhornio-csi-attacher
tag: v3.2.1
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
kubeletRootDir: /var/lib/kubelet
cloudConfig:
secretName: ""
hostPath: "/var/lib/rancher/rke2/etc/config-files/"
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: kubevirt.io/drain
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Equal
- key: cattle.io/os
operator: Equal
value: "linux"
effect: NoSchedule
global:
cattle:
systemDefaultRegistry: ""

View File

@ -0,0 +1,16 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/release-name: neuvector-crd
apiVersion: v1
appVersion: 5.0.0
description: Helm chart for NeuVector's CRD services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-crd
type: application
version: 100.0.0+up2.2.0

View File

@ -0,0 +1,19 @@
# NeuVector Helm Chart
Helm chart for NeuVector container security's CRD services. NeuVector's CRD (Custom Resource Definition) capture and declare application security policies early in the pipeline, then defined policies can be deployed together with the contaier applications.
Because the CRD poclies can be deployed before NeuVector's core product, this separate helm chart is created. For the backward compatibility reason, crd.yaml is not removed in the 'core' chart. If you use this 'crd' chart, please set 'crdwebhook.enabled' to false in the 'core' chart.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`openshift` | If deploying in OpenShift, set this to true | `false` |
`serviceAccount` | Service account name for NeuVector components | `default` |
`crdwebhook.type` | crd webhook type | `ClusterIP` |
---
Contact <support@neuvector.com> for access to Docker Hub and docs.

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
openshift: false
serviceAccount: neuvector
crdwebhook:
type: ClusterIP
enabled: true

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,28 @@
annotations:
catalog.cattle.io/auto-install: neuvector-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: NeuVector
catalog.cattle.io/kube-version: '>=1.18.0-0'
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/os: linux
catalog.cattle.io/permit-os: linux
catalog.cattle.io/provides-gvr: neuvector.com/v1
catalog.cattle.io/rancher-version: '>= 2.6.5-0 <= 2.6.100-0'
catalog.cattle.io/release-name: neuvector
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: neuvector
catalog.cattle.io/upstream-version: 2.2.0
apiVersion: v1
appVersion: 5.0.0
description: Helm feature chart for NeuVector's core services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
keywords:
- security
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector
sources:
- https://github.com/neuvector/neuvector
version: 100.0.0+up2.2.0

View File

@ -0,0 +1,198 @@
# NeuVector Helm Chart
Helm chart for NeuVector container security's core services.
## Preparation if using Helm 2
- Kubernetes 1.7+
- Helm installed and Tiller pod is running
- Cluster role `cluster-admin` available, check by:
```console
$ kubectl get clusterrole cluster-admin
```
If nothing returned, then add the `cluster-admin`:
cluster-admin.yaml
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-admin
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
```
```console
$ kubectl create -f cluster-admin.yaml
```
- If you have not created a service account for tiller, and give it admin abilities on the cluster:
```console
$ kubectl create serviceaccount --namespace kube-system tiller
$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
$ kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n kube-system
```
## CRD
Because the CRD (Custom Resource Definition) policies can be deployed before NeuVector's core product, a new 'crd' helm chart is created. The crd template in the 'core' chart is kept for the backward compatibility. Please set 'crdwebhook.enabled' to false, if you use the new 'crd' chart.
## Choosing container runtime
NeuVector platform support docker, cri-o and containerd as the container runtime. For the k3s or bottlerocket cluster, they have their own runtime socket path. You should enable their runtime, k3s.enabled and bottlerocket.enabled, respectively.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`openshift` | If deploying in OpenShift, set this to true | `false` |
`registry` | NeuVector container registry | `registry.neuvector.com` |
`tag` | image tag for controller enforcer manager | `latest` |
`oem` | OEM release name | `nil` |
`imagePullSecrets` | image pull secret | `nil` |
`psp` | NeuVector Pod Security Policy when psp policy is enabled | `false` |
`serviceAccount` | Service account name for NeuVector components | `default` |
`controller.enabled` | If true, create controller | `true` |
`controller.image.repository` | controller image repository | `neuvector/controller` |
`controller.image.hash` | controller image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`controller.replicas` | controller replicas | `3` |
`controller.schedulerName` | kubernetes scheduler name | `nil` |
`controller.affinity` | controller affinity rules | ... | spread controllers to different nodes |
`controller.tolerations` | List of node taints to tolerate | `nil` |
`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`controller.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`controller.disruptionbudget` | controller PodDisruptionBudget. 0 to disable. Recommended value: 2. | `0` |
`controller.priorityClassName` | controller priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`controller.env` | User-defined environment variables for controller. | `[]` |
`controller.ranchersso.enabled` | If true, enable Rancher single sign on | `false` | Rancher server address auto configured.|
`controller.pvc.enabled` | If true, enable persistence for controller using PVC | `false` | Require persistent volume type RWX, and storage 1Gi
`controller.pvc.storageClass` | Storage Class to be used | `default` |
`controller.pvc.capacity` | Storage capacity | `1Gi` |
`controller.azureFileShare.enabled` | If true, enable the usage of an existing or statically provisioned Azure File Share | `false` |
`controller.azureFileShare.secretName` | The name of the secret containing the Azure file share storage account name and key | `nil` |
`controller.azureFileShare.shareName` | The name of the Azure file share to use | `nil` |
`controller.apisvc.type` | Controller REST API service type | `nil` |
`controller.apisvc.annotations` | Add annotations to controller REST API service | `{}` |
`controller.apisvc.route.enabled` | If true, create a OpenShift route to expose the Controller REST API service | `false` |
`controller.apisvc.route.termination` | Specify TLS termination for OpenShift route for Controller REST API service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.apisvc.route.host` | Set controller REST API service hostname | `nil` |
`controller.certificate.secret` | Replace controller REST API certificate using secret if secret name is specified | `nil` |
`controller.certificate.keyFile` | Replace controller REST API certificate key file | `tls.key` |
`controller.certificate.pemFile` | Replace controller REST API certificate pem file | `tls.pem` |
`controller.federation.mastersvc.type` | Multi-cluster primary cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` |
`controller.federation.mastersvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster primary cluster service | `false` |
`controller.federation.mastersvc.route.host` | Set OpenShift route host for primary cluster service | `nil` |
`controller.federation.mastersvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster primary cluster service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.federation.mastersvc.ingress.enabled` | If true, create ingress for federation master service, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.federation.mastersvc.ingress.tls` | If true, TLS is enabled for controller federation master ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.mastersvc.ingress.host`.
`controller.federation.mastersvc.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.federation.mastersvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.mastersvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`controller.federation.managedsvc.type` | Multi-cluster managed cluster service type. If specified, the deployment will be managed by the managed clsuter. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` |
`controller.federation.managedsvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster managed cluster service | `false` |
`controller.federation.managedsvc.route.host` | Set OpenShift route host for manageed service | `nil` |
`controller.federation.managedsvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster managed cluster service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.federation.managedsvc.ingress.enabled` | If true, create ingress for federation managed service, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.federation.managedsvc.ingress.tls` | If true, TLS is enabled for controller federation managed ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.managedsvc.ingress.host`.
`controller.federation.managedsvc.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.federation.managedsvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.managedsvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`controller.ingress.enabled` | If true, create ingress for rest api, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.ingress.tls` | If true, TLS is enabled for controller rest api ingress service |`false` | If set, the tls-host used is the one set with `controller.ingress.host`.
`controller.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`controller.configmap.enabled` | If true, configure NeuVector global settings using a ConfigMap | `false`
`controller.configmap.data` | NeuVector configuration in YAML format | `{}`
`controller.secret.enabled` | If true, configure NeuVector global settings using secrets | `false`
`controller.secret.data` | NeuVector configuration in key/value pair format | `{}`
`enforcer.enabled` | If true, create enforcer | `true` |
`enforcer.image.repository` | enforcer image repository | `neuvector/enforcer` |
`enforcer.image.hash` | enforcer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`enforcer.priorityClassName` | enforcer priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`enforcer.tolerations` | List of node taints to tolerate | `- effect: NoSchedule`<br>`key: node-role.kubernetes.io/master` | other taints can be added after the default
`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`manager.enabled` | If true, create manager | `true` |
`manager.image.repository` | manager image repository | `neuvector/manager` |
`manager.image.hash` | manager image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`manager.priorityClassName` | manager priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`manager.env.ssl` | If false, manager will listen on HTTP access instead of HTTPS | `true` |
`manager.svc.type` | set manager service type for native Kubernetes | `NodePort`;<br>if it is OpenShift platform or ingress is enabled, then default is `ClusterIP` | set to LoadBalancer if using cloud providers, such as Azure, Amazon, Google
`manager.svc.loadBalancerIP` | if manager service type is LoadBalancer, this is used to specify the load balancer's IP | `nil` |
`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`manager.route.enabled` | If true, create a OpenShift route to expose the management consol service | `true` |
`manager.route.host` | Set OpenShift route host for management consol service | `nil` |
`manager.route.termination` | Specify TLS termination for OpenShift route for management consol service. Possible passthrough, edge, reencrypt | `passthrough` |
`manager.certificate.secret` | Replace manager UI certificate using secret if secret name is specified | `nil` |
`manager.certificate.keyFile` | Replace manager UI certificate key file | `tls.key` |
`manager.certificate.pemFile` | Replace manager UI certificate pem file | `tls.pem` |
`manager.ingress.enabled` | If true, create ingress, must also set ingress host value | `false` | enable this if ingress controller is installed
`manager.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`manager.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. Currently only supports `/`
`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`manager.ingress.tls` | If true, TLS is enabled for manager ingress service |`false` | If set, the tls-host used is the one set with `manager.ingress.host`.
`manager.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml)
`manager.affinity` | manager affinity rules | `{}` |
`manager.tolerations` | List of node taints to tolerate | `nil` |
`manager.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`cve.updater.enabled` | If true, create cve updater | `true` |
`cve.updater.secure` | If ture, API server's certificate is validated | `false` |
`cve.updater.image.repository` | cve updater image repository | `neuvector/updater` |
`cve.updater.image.tag` | image tag for cve updater | `latest` |
`cve.updater.image.hash` | cve updateer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`cve.updater.priorityClassName` | cve updater priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`cve.updater.schedule` | cronjob cve updater schedule | `0 0 * * *` |
`cve.scanner.enabled` | If true, cve scanners will be deployed | `true` |
`cve.scanner.image.repository` | cve scanner image repository | `neuvector/scanner` |
`cve.scanner.image.tag` | cve scanner image tag | `latest` |
`cve.updater.image.hash` | cve scanner image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`cve.scanner.priorityClassName` | cve scanner priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`cve.scanner.replicas` | external scanner replicas | `3` |
`cve.scanner.dockerPath` | the remote docker socket if CI/CD integration need scan images before they are pushed to the registry | `nil` |
`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/5.0.0/charts/core/values.yaml) |
`cve.scanner.affinity` | scanner affinity rules | `{}` |
`cve.scanner.tolerations` | List of node taints to tolerate | `nil` |
`cve.scanner.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`docker.path` | docker path | `/var/run/docker.sock` |
`containerd.enabled` | Set to true, if the container runtime is containerd | `false` | **Note**: For k3s cluster, set k3s.enabled to true instead
`containerd.path` | If containerd is enabled, this local containerd socket path will be used | `/var/run/containerd/containerd.sock` |
`crio.enabled` | Set to true, if the container runtime is cri-o | `false` |
`crio.path` | If cri-o is enabled, this local cri-o socket path will be used | `/var/run/crio/crio.sock` |
`k3s.enabled` | Set to true for k3s | `false` |
`k3s.runtimePath` | If k3s is enabled, this local containerd socket path will be used | `/run/k3s/containerd/containerd.sock` |
`bottlerocket.enabled` | Set to true if using AWS bottlerocket | `false` |
`bottlerocket.runtimePath` | If bottlerocket is enabled, this local containerd socket path will be used | `/run/dockershim.sock` |
`admissionwebhook.type` | admission webhook type | `ClusterIP` |
`crdwebhook.enabled` | Enable crd service and create crd related resources | `true` |
`crdwebhook.type` | crd webhook type | `ClusterIP` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release --namespace neuvector ./neuvector-helm/ --set manager.env.ssl=off
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install my-release --namespace neuvector ./neuvector-helm/ -f values.yaml
```
---

View File

@ -0,0 +1,16 @@
### Run-Time Protection Without Compromise
NeuVector delivers a complete run-time security solution with container process/file system protection and vulnerability scanning combined with the only true Layer 7 container firewall. Protect sensitive data with a complete container security platform.
NeuVector integrates tightly with Rancher and Kubernetes to extend the built-in security features for applications that require defense in depth. Security features include:
+ Build phase vulnerability scanning with Jenkins plug-in and registry scanning
+ Admission control to prevent vulnerable or unauthorized image deployments using Kubernetes admission control webhooks
+ Complete run-time scanning with network, process, and file system monitoring and protection
+ The industry's only layer 7 container firewall for multi-protocol threat detection and automated segmentation
+ Advanced network controls including DLP detection, service mesh integration, connection blocking and packet captures
+ Run-time vulnerability scanning and CIS benchmarks
Additional Notes:
+ Previous deployments from Rancher, such as from our Partners chart repository or the primary NeuVector Helm chart, must be completely removed in order to update to the new integrated feature chart. See https://github.com/rancher/rancher/issues/37447.
+ Configure correct container runtime and runtime path under container runtime. Enable only one runtime.

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,224 @@
questions:
#image configurations
- variable: registry
default: "docker.io"
description: image registry
type: string
label: Image Registry
group: "Container Images"
- variable: controller.image.repository
default: "neuvector/controller"
description: controller image repository
type: string
label: Controller image path
group: "Container Images"
- variable: controller.image.tag
default: ""
description: image tag for controller
type: string
label: Controller Image Tag
group: "Container Images"
- variable: manager.image.repository
default: "neuvector/manager"
description: manager image repository
type: string
label: Manager image path
group: "Container Images"
- variable: manager.image.tag
default: ""
description: image tag for manager
type: string
label: Manager Image Tag
group: "Container Images"
- variable: enforcer.image.repository
default: "neuvector/enforcer"
description: enforcer image repository
type: string
label: Enforcer image path
group: "Container Images"
- variable: enforcer.image.tag
default: ""
description: image tag for enforcer
type: string
label: Enforcer Image Tag
group: "Container Images"
- variable: cve.scanner.image.repository
default: "neuvector/scanner"
description: scanner image repository
type: string
label: Scanner image path
group: "Container Images"
- variable: cve.scanner.image.tag
default: ""
description: image tag for scanner
type: string
label: Scanner Image Tag
group: "Container Images"
- variable: cve.updater.image.repository
default: "neuvector/updater"
description: cve updater image repository
type: string
label: CVE Updater image path
group: "Container Images"
- variable: cve.updater.image.tag
default: ""
description: image tag for updater
type: string
label: Updater Image Tag
group: "Container Images"
#Container Runtime configurations
- variable: docker.enabled
default: true
description: Docker runtime. Enable only one runtime.
type: boolean
label: Docker Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: docker.path
default: "/var/run/docker.sock"
description: "Docker Runtime Path"
type: string
label: Runtime Path
- variable: containerd.enabled
default: "false"
description: Containerd runtime. Enable only one runtime.
type: boolean
label: Containerd Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: containerd.path
default: " /var/run/containerd/containerd.sock"
description: "Containerd Runtime Path"
type: string
label: Runtime Path
- variable: crio.enabled
default: "false"
description: CRI-O runtime. Enable only one runtime.
type: boolean
label: CRI-O Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: crio.path
default: "/var/run/crio/crio.sock"
description: "CRI-O Runtime Path"
type: string
label: Runtime Path
- variable: k3s.enabled
default: "false"
description: k3s containerd runtime. Enable only one runtime.
type: boolean
label: k3s Containerd Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: k3s.runtimePath
default: " /run/k3s/containerd/containerd.sock"
description: "k3s Containerd Runtime Path"
type: string
label: Runtime Path
#storage configurations
- variable: controller.pvc.enabled
default: false
description: If true, enable persistence for controller using PVC
type: boolean
label: PVC status
group: "PVC Configuration"
- variable: controller.pvc.storageClass
default: ""
description: Storage Class to be used
type: string
label: Storage Class Name
group: "PVC Configuration"
#ingress configurations
- variable: manager.ingress.enabled
default: false
description: If true, create ingress, must also set ingress host value
type: boolean
label: Manager ingress status
group: "Ingress Configuration"
- variable: manager.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Manager Ingress host
group: "Ingress Configuration"
- variable: manager.ingress.path
default: "/"
description: Set ingress path
type: string
label: Manager Ingress path
group: "Ingress Configuration"
- variable: manager.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation.
type: string
label: Manager Ingress annotations
group: "Ingress Configuration"
- variable: controller.ingress.enabled
default: false
description: If true, create ingress for rest api, must also set ingress host value
type: boolean
label: Controller ingress status
group: "Ingress Configuration"
- variable: controller.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Controller Ingress host
group: "Ingress Configuration"
- variable: controller.ingress.path
default: "/"
description: Set ingress path
type: string
label: Controller Ingress path
group: "Ingress Configuration"
- variable: controller.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation.
type: string
label: Controller Ingress annotations
group: "Ingress Configuration"
#service configurations
- variable: manager.svc.type
default: "NodePort"
description: Set manager service type for native Kubernetes
type: enum
label: Manager service type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"
- variable: controller.federation.mastersvc.type
default: ""
description: Multi-cluster master cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and Ingress
type: enum
label: Fed Master Service Type
group: "Service Configuration"
options:
- "NodePort"
- "Ingress"
- "LoadBalancer"
- variable: controller.federation.managedsvc.type
default: ""
description: Multi-cluster managed cluster service type. If specified, the deployment will be managed by the master clsuter. Possible values include NodePort, LoadBalancer and Ingress
type: enum
label: Fed Managed service type
group: "Service Configuration"
options:
- "NodePort"
- "Ingress"
- "LoadBalancer"
- variable: controller.apisvc.type
default: "NodePort"
description: Controller REST API service type
type: enum
label: Controller REST API Service Type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"

View File

@ -0,0 +1,20 @@
{{- if and .Values.manager.enabled .Values.manager.ingress.enabled }}
From outside the cluster, the NeuVector URL is:
http://{{ .Values.manager.ingress.host }}
{{- else if not .Values.openshift }}
Get the NeuVector URL by running these commands:
{{- if contains "NodePort" .Values.manager.svc.type }}
NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services neuvector-service-webui)
NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo https://$NODE_IP:$NODE_PORT
{{- else if contains "ClusterIP" .Values.manager.svc.type }}
CLUSTER_IP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.clusterIP}" services neuvector-service-webui)
echo https://$CLUSTER_IP:8443
{{- else if contains "LoadBalancer" .Values.manager.svc.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w neuvector-service-webui'
SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} neuvector-service-webui -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
echo https://$SERVICE_IP:8443
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-admission-webhook
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ports:
- port: 443
targetPort: 20443
protocol: TCP
name: admission-webhook
type: {{ .Values.admissionwebhook.type }}
selector:
app: neuvector-controller-pod

View File

@ -0,0 +1,119 @@
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-app
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- ""
resources:
- nodes
- pods
- services
- namespaces
verbs:
- get
- list
- watch
- update
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-rbac
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
{{- if .Values.openshift }}
- apiGroups:
- image.openshift.io
resources:
- imagestreams
verbs:
- get
- list
- watch
{{- end }}
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
- clusterrolebindings
- clusterroles
verbs:
- get
- list
- watch
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-admission
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- delete
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: neuvector-binding-co
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- config.openshift.io
resources:
- clusteroperators
verbs:
- get
- list
{{- end }}

View File

@ -0,0 +1,145 @@
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-app
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-app
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-rbac
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-rbac
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-admission
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-admission
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-view
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: view
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-co
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: neuvector-binding-co
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,193 @@
{{- if .Values.controller.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-controller-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.controller.replicas }}
minReadySeconds: 60
strategy:
{{ toYaml .Values.controller.strategy | indent 4 }}
selector:
matchLabels:
app: neuvector-controller-pod
template:
metadata:
labels:
app: neuvector-controller-pod
release: {{ .Release.Name }}
spec:
{{- if .Values.controller.affinity }}
affinity:
{{ toYaml .Values.controller.affinity | indent 8 }}
{{- end }}
{{- if .Values.controller.tolerations }}
tolerations:
{{ toYaml .Values.controller.tolerations | indent 8 }}
{{- end }}
{{- if .Values.controller.nodeSelector }}
nodeSelector:
{{ toYaml .Values.controller.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.controller.schedulerName }}
schedulerName: {{ .Values.controller.schedulerName }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-controller-pod
image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}
securityContext:
privileged: true
resources:
{{- if .Values.controller.resources }}
{{ toYaml .Values.controller.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
readinessProbe:
exec:
command:
- cat
- /tmp/ready
initialDelaySeconds: 5
periodSeconds: 5
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
- name: CLUSTER_ADVERTISED_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CLUSTER_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- if .Values.controller.ranchersso.enabled }}
- name: RANCHER_SSO
value: "1"
- name: RANCHER_EP
value: "{{ .Values.global.cattle.url }}"
{{- end }}
{{- if or .Values.controller.pvc.enabled .Values.controller.azureFileShare.enabled }}
- name: CTRL_PERSIST_CONFIG
value: "1"
{{- end }}
{{- with .Values.controller.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /var/neuvector
name: nv-share
readOnly: false
{{- if .Values.containerd.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.k3s.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.bottlerocket.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.crio.enabled }}
- mountPath: /var/run/crio/crio.sock
{{- else }}
- mountPath: /var/run/docker.sock
{{- end }}
name: runtime-sock
readOnly: true
- mountPath: /host/proc
name: proc-vol
readOnly: true
- mountPath: /host/cgroup
name: cgroup-vol
readOnly: true
- mountPath: /etc/config
name: config-volume
readOnly: true
{{- if .Values.controller.certificate.secret }}
- mountPath: /etc/neuvector/certs/ssl-cert.key
subPath: {{ .Values.controller.certificate.keyFile }}
name: cert
readOnly: true
- mountPath: /etc/neuvector/certs/ssl-cert.pem
subPath: {{ .Values.controller.certificate.pemFile }}
name: cert
readOnly: true
{{- end }}
terminationGracePeriodSeconds: 300
restartPolicy: Always
volumes:
- name: nv-share
{{- if .Values.controller.pvc.enabled }}
persistentVolumeClaim:
claimName: neuvector-data
{{- else if .Values.controller.azureFileShare.enabled }}
azureFile:
secretName: {{ .Values.controller.azureFileShare.secretName }}
shareName: {{ .Values.controller.azureFileShare.shareName }}
readOnly: false
{{- else }}
hostPath:
path: /var/neuvector
{{- end }}
- name: runtime-sock
hostPath:
{{- if .Values.containerd.enabled }}
path: {{ .Values.containerd.path }}
{{- else if .Values.crio.enabled }}
path: {{ .Values.crio.path }}
{{- else if .Values.k3s.enabled }}
path: {{ .Values.k3s.runtimePath }}
{{- else if .Values.bottlerocket.enabled }}
path: {{ .Values.bottlerocket.runtimePath }}
{{- else }}
path: {{ .Values.docker.path }}
{{- end }}
- name: proc-vol
hostPath:
path: /proc
- name: cgroup-vol
hostPath:
path: /sys/fs/cgroup
- name: config-volume
projected:
sources:
- configMap:
name: neuvector-init
optional: true
- secret:
name: neuvector-init
optional: true
{{- if .Values.controller.certificate.secret }}
- name: cert
secret:
secretName: {{ .Values.controller.certificate.secret }}
{{- end }}
{{- if gt (int .Values.controller.disruptionbudget) 0 }}
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: neuvector-controller-pdb
namespace: neuvector
spec:
minAvailable: {{ .Values.controller.disruptionbudget }}
selector:
matchLabels:
app: neuvector-controller-pod
{{- end }}
{{- end }}

View File

@ -0,0 +1,210 @@
{{- if .Values.controller.enabled }}
{{- if .Values.controller.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-restapi-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.ingress.host }}
{{- if .Values.controller.ingress.secretName }}
secretName: {{ .Values.controller.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.ingress.host }}
http:
paths:
- path: {{ .Values.controller.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-api
port:
number: 10443
{{- else }}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-restapi-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.ingress.host }}
{{- if .Values.controller.ingress.secretName }}
secretName: {{ .Values.controller.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.ingress.host }}
http:
paths:
- path: {{ .Values.controller.ingress.path }}
backend:
serviceName: neuvector-svc-controller-api
servicePort: 10443
{{- end }}
{{- end }}
{{- if .Values.controller.federation.mastersvc.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-mastersvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.mastersvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.mastersvc.ingress.host }}
{{- if .Values.controller.federation.mastersvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.mastersvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.mastersvc.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-fed-master
port:
number: 11443
{{- else }}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-mastersvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.mastersvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.mastersvc.ingress.host }}
{{- if .Values.controller.federation.mastersvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.mastersvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.mastersvc.ingress.path }}
backend:
serviceName: neuvector-svc-controller-fed-master
servicePort: 11443
{{- end }}
{{- end }}
{{- if .Values.controller.federation.managedsvc.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-managedsvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.managedsvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.managedsvc.ingress.host }}
{{- if .Values.controller.federation.managedsvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.managedsvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.managedsvc.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-fed-managed
port:
number: 10443
{{- else }}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-managedsvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.managedsvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.managedsvc.ingress.host }}
{{- if .Values.controller.federation.managedsvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.managedsvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.managedsvc.ingress.path }}
backend:
serviceName: neuvector-svc-controller-fed-managed
servicePort: 10443
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,82 @@
{{- if .Values.openshift -}}
{{- if .Values.controller.apisvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-api
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.apisvc.route.host }}
host: {{ .Values.controller.apisvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-api
port:
targetPort: controller-api
tls:
termination: {{ .Values.controller.apisvc.route.termination }}
---
{{ end -}}
{{- if .Values.controller.federation.mastersvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-fed-master
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.route.host }}
host: {{ .Values.controller.federation.mastersvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-fed-master
port:
targetPort: fed
tls:
termination: {{ .Values.controller.federation.mastersvc.route.termination }}
---
{{ end -}}
{{- if .Values.controller.federation.managedsvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-fed-managed
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.route.host }}
host: {{ .Values.controller.federation.managedsvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-fed-managed
port:
targetPort: fed
tls:
termination: {{ .Values.controller.federation.managedsvc.route.termination }}
{{ end -}}
{{- end -}}

View File

@ -0,0 +1,89 @@
{{- if .Values.controller.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
clusterIP: None
ports:
- port: 18300
protocol: "TCP"
name: "cluster-tcp-18300"
- port: 18301
protocol: "TCP"
name: "cluster-tcp-18301"
- port: 18301
protocol: "UDP"
name: "cluster-udp-18301"
selector:
app: neuvector-controller-pod
{{- if .Values.controller.apisvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-api
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.apisvc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.apisvc.type }}
ports:
- port: 10443
protocol: "TCP"
name: "controller-api"
selector:
app: neuvector-controller-pod
{{ end -}}
{{- if .Values.controller.federation.mastersvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-fed-master
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.federation.mastersvc.type }}
ports:
- port: 11443
name: fed
protocol: TCP
selector:
app: neuvector-controller-pod
{{ end -}}
{{- if .Values.controller.federation.managedsvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-fed-managed
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.federation.managedsvc.type }}
ports:
- port: 10443
name: fed
protocol: TCP
selector:
app: neuvector-controller-pod
{{ end -}}
{{- end -}}

View File

@ -0,0 +1,111 @@
{{- if .Values.enforcer.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: DaemonSet
metadata:
name: neuvector-enforcer-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: neuvector-enforcer-pod
template:
metadata:
labels:
app: neuvector-enforcer-pod
release: {{ .Release.Name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.enforcer.tolerations }}
tolerations:
{{ toYaml .Values.enforcer.tolerations | indent 8 }}
{{- end }}
hostPID: true
{{- if .Values.enforcer.priorityClassName }}
priorityClassName: {{ .Values.enforcer.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-enforcer-pod
image: {{ template "system_default_registry" . }}{{ .Values.enforcer.image.repository }}:{{ .Values.enforcer.image.tag }}
securityContext:
privileged: true
resources:
{{- if .Values.enforcer.resources }}
{{ toYaml .Values.enforcer.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
- name: CLUSTER_ADVERTISED_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CLUSTER_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
{{- if .Values.containerd.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.k3s.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.bottlerocket.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.crio.enabled }}
- mountPath: /var/run/crio/crio.sock
{{- else }}
- mountPath: /var/run/docker.sock
{{- end }}
name: runtime-sock
readOnly: true
- mountPath: /host/proc
name: proc-vol
readOnly: true
- mountPath: /host/cgroup
name: cgroup-vol
readOnly: true
- mountPath: /lib/modules
name: modules-vol
readOnly: true
terminationGracePeriodSeconds: 1200
restartPolicy: Always
volumes:
- name: runtime-sock
hostPath:
{{- if .Values.containerd.enabled }}
path: {{ .Values.containerd.path }}
{{- else if .Values.crio.enabled }}
path: {{ .Values.crio.path }}
{{- else if .Values.k3s.enabled }}
path: {{ .Values.k3s.runtimePath }}
{{- else if .Values.bottlerocket.enabled }}
path: {{ .Values.bottlerocket.runtimePath }}
{{- else }}
path: {{ .Values.docker.path }}
{{- end }}
- name: proc-vol
hostPath:
path: /proc
- name: cgroup-vol
hostPath:
path: /sys/fs/cgroup
- name: modules-vol
hostPath:
path: /lib/modules
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.controller.configmap.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: neuvector-init
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{ toYaml .Values.controller.configmap.data | indent 4 }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.controller.secret.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: neuvector-init
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- range $key, $val := .Values.controller.secret.data }}
{{ $key }}: | {{ toYaml $val | b64enc | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,81 @@
{{- if .Values.manager.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-manager-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: 1
selector:
matchLabels:
app: neuvector-manager-pod
template:
metadata:
labels:
app: neuvector-manager-pod
release: {{ .Release.Name }}
spec:
{{- if .Values.manager.affinity }}
affinity:
{{ toYaml .Values.manager.affinity | indent 8 }}
{{- end }}
{{- if .Values.manager.tolerations }}
tolerations:
{{ toYaml .Values.manager.tolerations | indent 8 }}
{{- end }}
{{- if .Values.manager.nodeSelector }}
nodeSelector:
{{ toYaml .Values.manager.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.manager.priorityClassName }}
priorityClassName: {{ .Values.manager.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-manager-pod
image: {{ template "system_default_registry" . }}{{ .Values.manager.image.repository }}:{{ .Values.manager.image.tag }}
env:
- name: CTRL_SERVER_IP
value: neuvector-svc-controller.{{ .Release.Namespace }}
{{- if not .Values.manager.env.ssl }}
- name: MANAGER_SSL
value: "off"
{{- end }}
volumeMounts:
{{- if .Values.manager.certificate.secret }}
- mountPath: /etc/neuvector/certs/ssl-cert.key
subPath: {{ .Values.manager.certificate.keyFile }}
name: cert
readOnly: true
- mountPath: /etc/neuvector/certs/ssl-cert.pem
subPath: {{ .Values.manager.certificate.pemFile }}
name: cert
readOnly: true
{{- end }}
resources:
{{- if .Values.manager.resources }}
{{ toYaml .Values.manager.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
restartPolicy: Always
volumes:
{{- if .Values.manager.certificate.secret }}
- name: cert
secret:
secretName: {{ .Values.manager.certificate.secret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,68 @@
{{- if and .Values.manager.enabled .Values.manager.ingress.enabled -}}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-webui-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.ingress.tls }}
tls:
- hosts:
- {{ .Values.manager.ingress.host }}
{{- if .Values.manager.ingress.secretName }}
secretName: {{ .Values.manager.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.manager.ingress.host }}
http:
paths:
- path: {{ .Values.manager.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-service-webui
port:
number: 8443
{{- else }}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-webui-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.ingress.tls }}
tls:
- hosts:
- {{ .Values.manager.ingress.host }}
{{- if .Values.manager.ingress.secretName }}
secretName: {{ .Values.manager.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.manager.ingress.host }}
http:
paths:
- path: {{ .Values.manager.ingress.path }}
backend:
serviceName: neuvector-service-webui
servicePort: 8443
{{- end }}
{{- end -}}

View File

@ -0,0 +1,28 @@
{{- if .Values.openshift -}}
{{- if .Values.manager.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-webui
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.route.host }}
host: {{ .Values.manager.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-service-webui
port:
targetPort: manager
tls:
termination: {{ .Values.manager.route.termination }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,26 @@
{{- if .Values.manager.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: neuvector-service-webui
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.svc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.manager.svc.type }}
{{- if and .Values.manager.svc.loadBalancerIP (eq .Values.manager.svc.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.manager.svc.loadBalancerIP }}
{{- end }}
ports:
- port: 8443
name: manager
protocol: TCP
selector:
app: neuvector-manager-pod
{{- end }}

View File

@ -0,0 +1,77 @@
{{- if .Values.psp -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: neuvector-binding-psp
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
privileged: true
readOnlyRootFilesystem: false
allowPrivilegeEscalation: true
allowedCapabilities:
- SYS_ADMIN
- NET_ADMIN
- SYS_PTRACE
- IPC_LOCK
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: neuvector-binding-psp
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
rules:
- apiGroups:
- policy
- extensions
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- neuvector-binding-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: neuvector-binding-psp
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: neuvector-binding-psp
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and .Values.controller.enabled .Values.controller.pvc.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: neuvector-data
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
accessModes:
{{ toYaml .Values.controller.pvc.accessModes | indent 4 }}
volumeMode: Filesystem
{{- if .Values.controller.pvc.storageClass }}
storageClassName: {{ .Values.controller.pvc.storageClass }}
{{- end }}
resources:
requests:
{{- if .Values.controller.pvc.capacity }}
storage: {{ .Values.controller.pvc.capacity }}
{{- else }}
storage: 1Gi
{{- end }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: RoleBinding
metadata:
name: neuvector-admin
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: admin
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: system:openshift:scc:privileged
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:openshift:scc:privileged
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,62 @@
{{- if .Values.cve.scanner.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-scanner-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
strategy:
{{ toYaml .Values.cve.scanner.strategy | indent 4 }}
replicas: {{ .Values.cve.scanner.replicas }}
selector:
matchLabels:
app: neuvector-scanner-pod
template:
metadata:
labels:
app: neuvector-scanner-pod
spec:
{{- if .Values.cve.scanner.affinity }}
affinity:
{{ toYaml .Values.cve.scanner.affinity | indent 8 }}
{{- end }}
{{- if .Values.cve.scanner.tolerations }}
tolerations:
{{ toYaml .Values.cve.scanner.tolerations | indent 8 }}
{{- end }}
{{- if .Values.cve.scanner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.cve.scanner.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.cve.scanner.priorityClassName }}
priorityClassName: {{ .Values.cve.scanner.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-scanner-pod
image: {{ template "system_default_registry" . }}{{ .Values.cve.scanner.image.repository }}:{{ .Values.cve.scanner.image.tag }}
imagePullPolicy: Always
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
{{- if .Values.cve.scanner.dockerPath }}
- name: SCANNER_DOCKER_URL
value: {{ .Values.cve.scanner.dockerPath }}
{{- end }}
resources:
{{ toYaml .Values.cve.scanner.resources | indent 12 }}
restartPolicy: Always
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if not .Values.openshift}}
{{- if ne .Values.serviceAccount "default"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,65 @@
{{- if .Values.cve.updater.enabled -}}
{{- if (semverCompare ">=1.21-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: batch/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: batch/v1beta1
{{- else }}
apiVersion: batch/v2alpha1
{{- end }}
kind: CronJob
metadata:
name: neuvector-updater-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
schedule: {{ .Values.cve.updater.schedule | quote }}
jobTemplate:
spec:
template:
metadata:
labels:
app: neuvector-updater-pod
release: {{ .Release.Name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.cve.updater.priorityClassName }}
priorityClassName: {{ .Values.cve.updater.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-updater-pod
image: {{ template "system_default_registry" . }}{{ .Values.cve.updater.image.repository }}:{{ .Values.cve.updater.image.tag }}
imagePullPolicy: Always
command:
- /bin/sh
- -c
- sleep 30
{{- if .Values.cve.scanner.enabled }}
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
{{- if .Values.cve.updater.secure }}
- /usr/bin/curl -v -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- else }}
- /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- end }}
{{- else }}
- /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/extensions/v1beta1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- end }}
{{- end }}
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
restartPolicy: Never
{{- end }}

View File

@ -0,0 +1,295 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
openshift: false
registry: docker.io
oem:
psp: false
serviceAccount: neuvector
controller:
# If false, controller will not be installed
enabled: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
image:
repository: rancher/mirrored-neuvector-controller
tag: 5.0.0
hash:
replicas: 3
disruptionbudget: 0
schedulerName:
priorityClassName:
env: []
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- neuvector-controller-pod
topologyKey: "kubernetes.io/hostname"
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
apisvc:
type:
annotations: {}
# OpenShift Route configuration
route:
enabled: false
termination: passthrough
host:
ranchersso:
enabled: true
pvc:
enabled: false
accessModes:
- ReadWriteMany
storageClass:
capacity:
azureFileShare:
enabled: false
secretName:
shareName:
certificate:
secret:
keyFile: tls.key
pemFile: tls.pem
federation:
mastersvc:
type:
# Federation Master Ingress
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
ingress.kubernetes.io/protocol: https
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
# OpenShift Route configuration
route:
enabled: false
termination: passthrough
host:
managedsvc:
type:
# Federation Managed Ingress
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
ingress.kubernetes.io/protocol: https
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
# OpenShift Route configuration
route:
enabled: false
termination: passthrough
host:
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
ingress.kubernetes.io/protocol: https
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
configmap:
enabled: false
data:
# eulainitcfg.yaml: |
# ...
# ldapinitcfg.yaml: |
# ...
# oidcinitcfg.yaml: |
# ...
# samlinitcfg.yaml: |
# ...
# sysinitcfg.yaml: |
# ...
# userinitcfg.yaml: |
# ...
secret:
# NOTE: files defined here have preferrence over the ones defined in the configmap section
enabled: false
data: {}
# eulainitcfg.yaml:
# license_key: 0Bca63Iy2FiXGqjk...
# ...
# ldapinitcfg.yaml:
# directory: OpenLDAP
# ...
# oidcinitcfg.yaml:
# Issuer: https://...
# ...
# samlinitcfg.yaml:
# ...
# sysinitcfg.yaml:
# ...
# userinitcfg.yaml:
# ...
enforcer:
# If false, enforcer will not be installed
enabled: true
image:
repository: rancher/mirrored-neuvector-enforcer
tag: 5.0.0
hash:
priorityClassName:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
manager:
# If false, manager will not be installed
enabled: true
image:
repository: rancher/mirrored-neuvector-manager
tag: 5.0.0
hash:
priorityClassName:
env:
ssl: true
svc:
type: NodePort
loadBalancerIP:
annotations: {}
# azure
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "apps-subnet"
# OpenShift Route configuration
route:
enabled: true
termination: passthrough
host:
certificate:
secret:
keyFile: tls.key
pemFile: tls.pem
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
path: "/"
annotations: {}
# kubernetes.io/ingress.class: my-nginx
# nginx.ingress.kubernetes.io/whitelist-source-range: "1.1.1.1"
# nginx.ingress.kubernetes.io/rewrite-target: /
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# only for end-to-end tls conf - ingress-nginx accepts backend self-signed cert
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
tls: false
secretName: # my-tls-secret
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
affinity: {}
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
cve:
updater:
# If false, cve updater will not be installed
enabled: true
secure: false
image:
repository: rancher/mirrored-neuvector-updater
tag: latest
hash:
schedule: "0 0 * * *"
priorityClassName:
scanner:
enabled: true
replicas: 3
dockerPath: ""
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
image:
repository: rancher/mirrored-neuvector-scanner
tag: latest
hash:
priorityClassName:
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
affinity: {}
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
docker:
path: /var/run/docker.sock
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
k3s:
enabled: false
runtimePath: /run/k3s/containerd/containerd.sock
bottlerocket:
enabled: false
runtimePath: /run/dockershim.sock
containerd:
enabled: false
path: /var/run/containerd/containerd.sock
crio:
enabled: false
path: /var/run/crio/crio.sock
admissionwebhook:
type: ClusterIP
crdwebhook:
enabled: true
type: ClusterIP

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Prometheus Federator
catalog.cattle.io/kube-version: '>=1.16.0-0'
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/os: linux,windows
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.6.5-0 <= 2.6.100-0'
catalog.cattle.io/release-name: prometheus-federator
apiVersion: v2
appVersion: 0.1.0
dependencies:
- condition: helmProjectOperator.enabled
name: helmProjectOperator
repository: file://./charts/helmProjectOperator
description: Prometheus Federator
icon: https://raw.githubusercontent.com/rancher/prometheus-federator/main/assets/logos/prometheus-federator.svg
name: prometheus-federator
version: 0.1.0

View File

@ -0,0 +1,119 @@
# Prometheus Federator
This chart is deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains:
- [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator))
- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator))
- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) (deployed via an embedded Helm chart)
- Default PrometheusRules and Grafana dashboards based on the collection of community-curated resources from [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/)
- Default ServiceMonitors that watch the deployed resources
> **Important Note: Prometheus Federator is designed to be deployed alongside an existing Prometheus Operator deployment in a cluster that has already installed the Prometheus Operator CRDs.**
By default, the chart is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default.
## Pre-Installation: Using Prometheus Federator with Rancher and rancher-monitoring
If you are running your cluster on [Rancher](https://rancher.com/) and already have [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/) deployed onto your cluster, Prometheus Federator's default configuration should already be configured to work with your existing Cluster Monitoring Stack; however, here are some notes on how we recommend you configure rancher-monitoring to optimize the security and usability of Prometheus Federator in your cluster:
### Ensure the cattle-monitoring-system namespace is placed into the System Project (or a similarly locked down Project that has access to other Projects in the cluster)
Prometheus Operator's security model expects that the namespace it is deployed into (`cattle-monitoring-system`) has limited access for anyone except Cluster Admins to avoid privilege escalation via execing into Pods (such as the Jobs executing Helm operations). In addition, deploying Prometheus Federator and all Project Prometheus stacks into the System Project ensures that the each Project Prometheus is able to reach out to scrape workloads across all Projects (even if Network Policies are defined via Project Network Isolation) but has limited access for Project Owners, Project Members, and other users to be able to access data they shouldn't have access to (i.e. being allowed to exec into pods, set up the ability to scrape namespaces outside of a given Project, etc.).
### Configure rancher-monitoring to only watch for resources created by the Helm chart itself
Since each Project Monitoring Stack will watch the other namespaces and collect additional custom workload metrics or dashboards already, it's recommended to configure the following settings on all selectors to ensure that the Cluster Prometheus Stack only monitors resources created by the Helm Chart itself:
```
matchLabels:
release: "rancher-monitoring"
```
The following selector fields are recommended to have this value:
- `.Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector`
- `.Values.prometheus.prometheusSpec.serviceMonitorSelector`
- `.Values.prometheus.prometheusSpec.podMonitorSelector`
- `.Values.prometheus.prometheusSpec.ruleSelector`
- `.Values.prometheus.prometheusSpec.probeSelector`
Once this setting is turned on, you can always create ServiceMonitors or PodMonitors that are picked up by the Cluster Prometheus by adding the label `release: "rancher-monitoring"` to them (in which case they will be ignored by Project Monitoring Stacks automatically by default, even if the namespace in which those ServiceMonitors or PodMonitors reside in are not system namespaces).
> Note: If you don't want to allow users to be able to create ServiceMonitors and PodMonitors that aggregate into the Cluster Prometheus in Project namespaces, you can additionally set the namespaceSelectors on the chart to only target system namespaces (which must contain `cattle-monitoring-system` and `cattle-dashboards`, where resources are deployed into by default by rancher-monitoring; you will also need to monitor the `default` namespace to get apiserver metrics or create a custom ServiceMonitor to scrape apiserver metrics from the Service residing in the default namespace) to limit your Cluster Prometheus from picking up other Prometheus Operator CRs; in that case, it would be recommended to turn `.Values.prometheus.prometheusSpec.ignoreNamespaceSelectors=true` to allow you to define ServiceMonitors that can monitor non-system namespaces from within a system namespace.
In addition, if you modified the default `.Values.grafana.sidecar.*.searchNamespace` values on the Grafana Helm subchart for Monitoring V2, it is also recommended to remove the overrides or ensure that your defaults are scoped to only system namespaces for the following values:
- `.Values.grafana.sidecar.dashboards.searchNamespace` (default `cattle-dashboards`)
- `.Values.grafana.sidecar.datasources.searchNamespace` (default `null`, which means it uses the release namespace `cattle-monitoring-system`)
- `.Values.grafana.sidecar.notifiers.searchNamespace` (default `null`, which means it uses the release namespace `cattle-monitoring-system`)
### Increase the CPU / memory limits of the Cluster Prometheus
Depending on a cluster's setup, it's generally recommended to give a large amount of dedicated memory to the Cluster Prometheus to avoid restarts due to out-of-memory errors (OOMKilled), usually caused by churn created in the cluster that causes a large number of high cardinality metrics to be generated and ingested by Prometheus within one block of time; this is one of the reasons why the default Rancher Monitoring stack expects around 4GB of RAM to be able to operate in a normal-sized cluster. However, when introducing Project Monitoring Stacks that are all sending `/federate` requests to the same Cluster Prometheus and are reliant on the Cluster Prometheus being "up" to federate that system data on their namespaces, it's even more important that the Cluster Prometheus has an ample amount of CPU / memory assigned to it to prevent an outage that can cause data gaps across all Project Prometheis in the cluster.
> Note: There are no specific recommendations on how much memory the Cluster Prometheus should be configured with since it depends entirely on the user's setup (namely the likelihood of encountering a high churn rate and the scale of metrics that could be generated at that time); it generally varies per setup.
## How does the operator work?
1. On deploying this chart, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `monitoring.cattle.io/v1alpha1` (also known as "Project Monitors" in the Rancher UI) in a **Project Registration Namespace (`cattle-project-<id>`)**.
2. On seeing each ProjectHelmChartCR, the operator will automatically deploy a Project Prometheus stack on the Project Owner's behalf in the **Project Release Namespace (`cattle-project-<id>-monitoring`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**.
3. RBAC will automatically be assigned in the Project Release Namespace to allow users to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack deployed; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) (see below for more information about configuring RBAC).
### What is a Project?
In Prometheus Federator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`; by default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given [Rancher](https://rancher.com/) Project.
### Configuring the Helm release created by a ProjectHelmChart
The `spec.values` of this ProjectHelmChart resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either:
- View to the chart's definition located at [`rancher/prometheus-federator` under `charts/rancher-project-monitoring`](https://github.com/rancher/prometheus-federator/blob/main/charts/rancher-project-monitoring) (where the chart version will be tied to the version of this operator)
- Look for the ConfigMap named `monitoring.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `prometheus-federator` binary).
### Namespaces
As a Project Operator based on [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator), Prometheus Federator has three different classifications of namespaces that the operator looks out for:
1. **Operator / System Namespace**: this is the namespace that the operator is deployed into (e.g. `cattle-monitoring-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.**
2. **Project Registration Namespace (`cattle-project-<id>`)**: this is the set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace (see more details below). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace**.
> Note: Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided (which is set to `field.cattle.io/projectId` by default); this indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g. this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted) or it is no longer watching that project (because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects). These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project
> Note: if `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace
3. **Project Release Namespace (`cattle-project-<id>-monitoring`)**: this is the set of namespaces that the operator deploys Project Monitoring Stacks within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Project Monitoring Stack based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Prometheus Federator.**
> Note: Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue` (which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified) whenever a ProjectHelmChart is specified in a Project Registration Namespace
> Note: Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above)
> Note: if `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace
### Helm Resources (HelmChart, HelmRelease)
On deploying a ProjectHelmChart, the Prometheus Federator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn:
- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): this custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR; this CR is automatically updated on changes to the ProjectHelmChart (e.g. modifying the values.yaml) or changes to the underlying Project definition (e.g. adding or removing namespaces from a project).
> **Important Note: If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation; however, this is generally only accessible by a Cluster Admin.**
- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): this custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR).
> Note: HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place; to view these events, you can use `kubectl describe helmrelease <helm-release-name> -n <operator/system-namespace>`; you can also view the logs on this operator to see when changes are detected and which resources were attempted to be modified
Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users.
### RBAC
As described in the section on namespaces above, Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings:
- ClusterRoleBindings
- RoleBindings in the Project Release Namespace
On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin`, `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit`, or `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view`; by default, these roleRefs correspond will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles).
> Note: for Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates.
If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels:
- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}`
- `helm.cattle.io/project-helm-chart-role-aggregate-from: <admin|edit|view>`
By default, the `rancher-project-monitoring` (the underlying chart deployed by Prometheus Federator) creates three default Roles per Project Release Namespace that provide `admin`, `edit`, and `view` users to permissions to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack to provide least privilege; however, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or created Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces.
### Advanced Helm Project Operator Configuration
|Value|Configuration|
|---|---------------------------|
|`helmProjectOperator.valuesOverride`| Allows an Operator to override values that are set on each ProjectHelmChart deployment on an operator-level; user-provided options (specified on the `spec.values` of the ProjectHelmChart) are automatically overridden if operator-level values are provided. For an exmaple, see how the default value overrides `federate.targets` (note: when overriding list values like `federate.targets`, user-provided list values will **not** be concatenated) |
|`helmProjectOperator.projectReleaseNamespaces.labelValues`| The value of the Project that all Project Release Namespaces should be auto-imported into (via label and annotation). Not recommended to be overridden on a Rancher setup. |
|`helmProjectOperator.otherSystemProjectLabelValues`| Other namespaces that the operator should treat as a system namespace that should not be monitored. By default, all namespaces that match `global.cattle.systemProjectId` will not be matched. `cattle-monitoring-system`, `cattle-dashboards`, and `kube-system` are explicitly marked as system namespaces as well, regardless of label or annotation. |
|`helmProjectOperator.releaseRoleBindings.aggregate`| Whether to automatically create RBAC resources in Project Release namespaces
|`helmProjectOperator.releaseRoleBindings.clusterRoleRefs.<admin\|edit\|view>`| ClusterRoles to reference to discover subjects to create RoleBindings for in the Project Release Namespace for all corresponding Project Release Roles. See RBAC above for more information |
|`helmProjectOperator.hardenedNamespaces.enabled`| Whether to automatically patch the default ServiceAccount with `automountServiceAccountToken: false` and create a default NetworkPolicy in all managed namespaces in the cluster; the default values ensure that the creation of the namespace does not break a CIS 1.16 hardened scan |
|`helmProjectOperator.hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace |
|`helmProjectOperator.helmController.enabled`| Whether to enable an embedded k3s-io/helm-controller instance within the Helm Project Operator. Should be disabled for RKE2 clusters since RKE2 clusters already run Helm Controller to manage internal Kubernetes components |
|`helmProjectOperator.helmLocker.enabled`| Whether to enable an embedded rancher/helm-locker instance within the Helm Project Operator. |

View File

@ -0,0 +1,10 @@
# Prometheus Federator
This chart deploys an operator that manages Project Monitoring Stacks composed of the following set of resources that are scoped to project namespaces:
- [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator))
- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator))
- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) (deployed via an embedded Helm chart)
- Default PrometheusRules and Grafana dashboards based on the collection of community-curated resources from [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/)
- Default ServiceMonitors that watch the deployed Prometheus, Grafana, and Alertmanager
Since this Project Monitoring Stack deploys Prometheus Operator CRs, an existing Prometheus Operator instance must already be deployed in the cluster for Prometheus Federator to successfully be able to deploy Project Monitoring Stacks. It is recommended to use [`rancher-monitoring`](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/) for this. For more information on how the chart works or advanced configurations, please read the `README.md`.

View File

@ -0,0 +1,15 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Helm Project Operator
catalog.cattle.io/kube-version: '>=1.16.0-0'
catalog.cattle.io/namespace: cattle-helm-system
catalog.cattle.io/os: linux,windows
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1
catalog.cattle.io/rancher-version: '>= 2.6.0-0 <=2.6.99-0'
catalog.cattle.io/release-name: helm-project-operator
apiVersion: v2
appVersion: 0.0.1
description: Helm Project Operator
name: helmProjectOperator
version: 0.0.1

View File

@ -0,0 +1,77 @@
# Helm Project Operator
## How does the operator work?
1. On deploying a Helm Project Operator, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `dummy.cattle.io/v1alpha1` in a **Project Registration Namespace (`cattle-project-<id>`)**.
2. On seeing each ProjectHelmChartCR, the operator will automatically deploy the embedded Helm chart on the Project Owner's behalf in the **Project Release Namespace (`cattle-project-<id>-dummy`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**.
3. RBAC will automatically be assigned in the Project Release Namespace to allow users to based on Role created in the Project Release Namespace with a given set of labels; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) (see below for more information about configuring RBAC).
### What is a Project?
In Helm Project Operator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`; by default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given [Rancher](https://rancher.com/) Project.
### What is a ProjectHelmChart?
A ProjectHelmChart is an instance of a (project-scoped) Helm chart deployed on behalf of a user who has permissions to create ProjectHelmChart resources in a Project Registration namespace.
Generally, the best way to think about the ProjectHelmChart model is by comparing it to two other models:
1. Managed Kubernetes providers (EKS, GKE, AKS, etc.): in this model, a user has the ability to say "I want a Kubernetes cluster" but the underlying cloud provider is responsible for provisioning the infrastructure and offering **limited view and access** of the underlying resources created on their behalf; similarly, Helm Project Operator allows a Project Owner to say "I want this Helm chart deployed", but the underlying Operator is responsible for "provisioning" (deploying) the Helm chart and offering **limited view and access** of the underlying Kubernetes resources created on their behalf (based on configuring "least-privilege" Kubernetes RBAC for the Project Owners / Members in the newly created Project Release Namespace).
2. Dynamically-provisioned Persistent Volumes: in this model, a single resource (PersistentVolume) exists that allows you to specify a Storage Class that actually implements provisioning the underlying storage via a Storage Class Provisioner (e.g. Longhorn). Similarly, the ProjectHelmChart exists that allows you to specify a `spec.helmApiVersion` ("storage class") that actually implements deploying the underlying Helm chart via a Helm Project Operator (e.g. [`rancher/prometheus-federator`](https://github.com/rancher/prometheus-federator)).
### Configuring the Helm release created by a ProjectHelmChart
The `spec.values` of this ProjectHelmChart resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either:
- View to the chart's definition located at [`rancher/helm-project-operator` under `charts/example-chart`](https://github.com/rancher/helm-project-operator/blob/main/charts/example-chart) (where the chart version will be tied to the version of this operator)
- Look for the ConfigMap named `dummy.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `helm-project-operator` binary).
### Namespaces
All Helm Project Operators have three different classifications of namespaces that the operator looks out for:
1. **Operator / System Namespace**: this is the namespace that the operator is deployed into (e.g. `cattle-helm-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.**
2. **Project Registration Namespace (`cattle-project-<id>`)**: this is the set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace (see more details below). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace**.
> Note: Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided (which is set to `field.cattle.io/projectId` by default); this indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g. this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted) or it is no longer watching that project (because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects). These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project
> Note: if `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace
3. **Project Release Namespace (`cattle-project-<id>-dummy`)**: this is the set of namespaces that the operator deploys Helm charts within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Helm charts based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Helm Project Operator.**
> Note: Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue` (which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified) whenever a ProjectHelmChart is specified in a Project Registration Namespace
> Note: Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above)
> Note: if `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace
### Helm Resources (HelmChart, HelmRelease)
On deploying a ProjectHelmChart, the Prometheus Federator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn:
- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): this custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR; this CR is automatically updated on changes to the ProjectHelmChart (e.g. modifying the values.yaml) or changes to the underlying Project definition (e.g. adding or removing namespaces from a project).
> **Important Note: If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation; however, this is generally only accessible by a Cluster Admin.**
- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): this custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR).
> Note: HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place; to view these events, you can use `kubectl describe helmrelease <helm-release-name> -n <operator/system-namespace>`; you can also view the logs on this operator to see when changes are detected and which resources were attempted to be modified
Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users.
### RBAC
As described in the section on namespaces above, Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings:
- ClusterRoleBindings
- RoleBindings in the Project Release Namespace
On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin`, `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit`, or `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view`; by default, these roleRefs correspond will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles).
> Note: for Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates.
If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels:
- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}`
- `helm.cattle.io/project-helm-chart-role-aggregate-from: <admin|edit|view>`
By default, the `example-chart` (the underlying chart deployed by Helm Project Operator) does not create any default roles; however, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or created Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces.
### Advanced Helm Project Operator Configuration
|Value|Configuration|
|---|---------------------------|
|`valuesOverride`| Allows an Operator to override values that are set on each ProjectHelmChart deployment on an operator-level; user-provided options (specified on the `spec.values` of the ProjectHelmChart) are automatically overridden if operator-level values are provided. For an exmaple, see how the default value overrides `federate.targets` (note: when overriding list values like `federate.targets`, user-provided list values will **not** be concatenated) |
|`projectReleaseNamespaces.labelValues`| The value of the Project that all Project Release Namespaces should be auto-imported into (via label and annotation). Not recommended to be overridden on a Rancher setup. |
|`otherSystemProjectLabelValues`| Other namespaces that the operator should treat as a system namespace that should not be monitored. By default, all namespaces that match `global.cattle.systemProjectId` will not be matched. `kube-system` is explicitly marked as a system namespace as well, regardless of label or annotation. |
|`releaseRoleBindings.aggregate`| Whether to automatically create RBAC resources in Project Release namespaces
|`releaseRoleBindings.clusterRoleRefs.<admin\|edit\|view>`| ClusterRoles to reference to discover subjects to create RoleBindings for in the Project Release Namespace for all corresponding Project Release Roles. See RBAC above for more information |
|`hardenedNamespaces.enabled`| Whether to automatically patch the default ServiceAccount with `automountServiceAccountToken: false` and create a default NetworkPolicy in all managed namespaces in the cluster; the default values ensure that the creation of the namespace does not break a CIS 1.16 hardened scan |
|`hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace |
|`helmController.enabled`| Whether to enable an embedded k3s-io/helm-controller instance within the Helm Project Operator. Should be disabled for RKE2 clusters since RKE2 clusters already run Helm Controller to manage internal Kubernetes components |
|`helmLocker.enabled`| Whether to enable an embedded rancher/helm-locker instance within the Helm Project Operator. |

View File

@ -0,0 +1,37 @@
questions:
- variable: helmController.enabled
label: Enable Embedded Helm Controller
description: 'Note: If you are running Prometheus Federator in an RKE2 cluster, this should be disabled.'
type: boolean
group: Helm Controller
- variable: helmLocker.enabled
label: Enable Embedded Helm Locker
type: boolean
group: Helm Locker
- variable: projectReleaseNamespaces.labelValue
label: Project Release Namespace Project ID
description: By default, the System Project is selected. This can be overriden to a different Project (e.g. p-xxxxx)
type: string
required: false
group: Namespaces
- variable: releaseRoleBindings.clusterRoleRefs.admin
label: Admin ClusterRole
description: By default, admin selects Project Owners. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: admin
required: false
group: RBAC
- variable: releaseRoleBindings.clusterRoleRefs.edit
label: Edit ClusterRole
description: By default, edit selects Project Members. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: edit
required: false
group: RBAC
- variable: releaseRoleBindings.clusterRoleRefs.view
label: View ClusterRole
description: By default, view selects Read-Only users. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: view
required: false
group: RBAC

View File

@ -0,0 +1,3 @@
{{ $.Chart.Name }} has been installed. Check its status by running:
kubectl --namespace {{ template "helm-project-operator.namespace" . }} get pods -l "release={{ $.Release.Name }}"

View File

@ -0,0 +1,66 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
# Helm Project Operator
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}}
{{- define "helm-project-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "helm-project-operator.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/* Create chart name and version as used by the chart label. */}}
{{- define "helm-project-operator.chartref" -}}
{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
{{- end }}
{{/* Generate basic labels */}}
{{- define "helm-project-operator.labels" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}"
app.kubernetes.io/part-of: {{ template "helm-project-operator.name" . }}
chart: {{ template "helm-project-operator.chartref" . }}
release: {{ $.Release.Name | quote }}
heritage: {{ $.Release.Service | quote }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,70 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "helm-project-operator.name" . }}-cleanup
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed
spec:
template:
metadata:
name: {{ template "helm-project-operator.name" . }}-cleanup
labels: {{ include "helm-project-operator.labels" . | nindent 8 }}
app: {{ template "helm-project-operator.name" . }}
spec:
serviceAccountName: {{ template "helm-project-operator.name" . }}
securityContext:
runAsNonRoot: false
runAsUser: 0
initContainers:
- name: add-cleanup-annotations
image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
echo "Labeling all ProjectHelmCharts with helm.cattle.io/helm-project-operator-cleanup=true";
EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }};
IFS=$'\n';
for namespace in $(kubectl get namespaces -l helm.cattle.io/helm-project-operated=true --no-headers -o=custom-columns=NAME:.metadata.name); do
for projectHelmChartAndHelmApiVersion in $(kubectl get projecthelmcharts -n ${namespace} --no-headers -o=custom-columns=NAME:.metadata.name,HELMAPIVERSION:.spec.helmApiVersion); do
projectHelmChartAndHelmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | xargs);
projectHelmChart=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f1);
helmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f2);
if [[ ${helmApiVersion} != ${EXPECTED_HELM_API_VERSION} ]]; then
echo "Skipping marking ${namespace}/${projectHelmChart} with cleanup annotation since spec.helmApiVersion: ${helmApiVersion} is not ${EXPECTED_HELM_API_VERSION}";
continue;
fi;
kubectl label projecthelmcharts -n ${namespace} ${projectHelmChart} helm.cattle.io/helm-project-operator-cleanup=true --overwrite;
done;
done;
containers:
- name: ensure-subresources-deleted
image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- >
SYSTEM_NAMESPACE={{ .Release.Namespace }}
EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }};
HELM_API_VERSION_TRUNCATED=$(echo ${EXPECTED_HELM_API_VERSION} | cut -d'/' -f0);
echo "Ensuring HelmCharts and HelmReleases are deleted from ${SYSTEM_NAMESPACE}...";
while [[ "$(kubectl get helmcharts,helmreleases -l helm.cattle.io/helm-api-version=${HELM_API_VERSION_TRUNCATED} -n ${SYSTEM_NAMESPACE} 2>&1)" != "No resources found in ${SYSTEM_NAMESPACE} namespace." ]]; do
echo "waiting for HelmCharts and HelmReleases to be deleted from ${SYSTEM_NAMESPACE}... sleeping 3 seconds";
sleep 3;
done;
echo "Successfully deleted all HelmCharts and HelmReleases in ${SYSTEM_NAMESPACE}!";
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.cleanup.nodeSelector }}
{{- toYaml .Values.cleanup.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.cleanup.tolerations }}
{{- toYaml .Values.cleanup.tolerations | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,57 @@
{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "helm-project-operator.name" . }}-admin
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups:
- helm.cattle.io
resources:
- projecthelmcharts
- projecthelmcharts/finalizers
- projecthelmcharts/status
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "helm-project-operator.name" . }}-edit
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
{{- end }}
rules:
- apiGroups:
- helm.cattle.io
resources:
- projecthelmcharts
- projecthelmcharts/status
verbs:
- 'get'
- 'list'
- 'watch'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "helm-project-operator.name" . }}-view
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
{{- end }}
rules:
- apiGroups:
- helm.cattle.io
resources:
- projecthelmcharts
- projecthelmcharts/status
verbs:
- 'get'
- 'list'
- 'watch'
{{- end }}

View File

@ -0,0 +1,14 @@
## Note: If you add another entry to this ConfigMap, make sure a corresponding env var is set
## in the deployment of the operator to ensure that a Helm upgrade will force the operator
## to reload the values in the ConfigMap and redeploy
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "helm-project-operator.name" . }}-config
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
data:
hardened.yaml: |-
{{ .Values.hardenedNamespaces.configuration | toYaml | indent 4 }}
values.yaml: |-
{{ .Values.valuesOverride | toYaml | indent 4 }}

View File

@ -0,0 +1,121 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "helm-project-operator.name" . }}
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
spec:
replicas: 1
selector:
matchLabels:
app: {{ template "helm-project-operator.name" . }}
release: {{ $.Release.Name | quote }}
template:
metadata:
labels: {{ include "helm-project-operator.labels" . | nindent 8 }}
app: {{ template "helm-project-operator.name" . }}
spec:
containers:
- name: {{ template "helm-project-operator.name" . }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
args:
- {{ template "helm-project-operator.name" . }}
- --namespace={{ template "helm-project-operator.namespace" . }}
- --controller-name={{ template "helm-project-operator.name" . }}
- --values-override-file=/etc/helmprojectoperator/config/values.yaml
{{- if .Values.global.cattle.systemDefaultRegistry }}
- --system-default-registry={{ .Values.global.cattle.systemDefaultRegistry }}
{{- end }}
{{- if .Values.global.cattle.url }}
- --cattle-url={{ .Values.global.cattle.url }}
{{- end }}
{{- if .Values.global.cattle.projectLabel }}
- --project-label={{ .Values.global.cattle.projectLabel }}
{{- end }}
{{- if not .Values.projectReleaseNamespaces.enabled }}
- --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }}
{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }}
- --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }}
{{- else if len .Values.otherSystemProjectLabelValues }}
- --system-project-label-values={{ join "," .Values.otherSystemProjectLabelValues }}
{{- end }}
{{- if .Values.projectReleaseNamespaces.enabled }}
{{- if .Values.projectReleaseNamespaces.labelValue }}
- --project-release-label-value={{ .Values.projectReleaseNamespaces.labelValue }}
{{- else if .Values.global.cattle.systemProjectId }}
- --project-release-label-value={{ .Values.global.cattle.systemProjectId }}
{{- end }}
{{- end }}
{{- if .Values.global.cattle.clusterId }}
- --cluster-id={{ .Values.global.cattle.clusterId }}
{{- end }}
{{- if .Values.releaseRoleBindings.aggregate }}
{{- if .Values.releaseRoleBindings.clusterRoleRefs }}
{{- if .Values.releaseRoleBindings.clusterRoleRefs.admin }}
- --admin-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.admin }}
{{- end }}
{{- if .Values.releaseRoleBindings.clusterRoleRefs.edit }}
- --edit-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.edit }}
{{- end }}
{{- if .Values.releaseRoleBindings.clusterRoleRefs.view }}
- --view-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.view }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.hardenedNamespaces.enabled }}
- --hardening-options-file=/etc/helmprojectoperator/config/hardening.yaml
{{- else }}
- --disable-hardening
{{- end }}
{{- if .Values.debug }}
- --debug
- --debug-level={{ .Values.debugLevel }}
{{- end }}
{{- if not .Values.helmController.enabled }}
- --disable-embedded-helm-controller
{{- else }}
- --helm-job-image={{ template "system_default_registry" . }}{{ .Values.helmController.job.image.repository }}:{{ .Values.helmController.job.image.tag }}
{{- end }}
{{- if not .Values.helmLocker.enabled }}
- --disable-embedded-helm-locker
{{- end }}
{{- if .Values.additionalArgs }}
{{- toYaml .Values.additionalArgs | nindent 10 }}
{{- end }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
## Note: The below two values only exist to force Helm to upgrade the deployment on
## a change to the contents of the ConfigMap during an upgrade. Neither serve
## any practical purpose and can be removed and replaced with a configmap reloader
## in a future change if dynamic updates are required.
- name: HARDENING_OPTIONS_SHA_256_HASH
value: {{ .Values.hardenedNamespaces.configuration | toYaml | sha256sum }}
- name: VALUES_OVERRIDE_SHA_256_HASH
value: {{ .Values.valuesOverride | toYaml | sha256sum }}
{{- if .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/helmprojectoperator/config"
serviceAccountName: {{ template "helm-project-operator.name" . }}
{{- if .Values.securityContext }}
securityContext: {{ toYaml .Values.securityContext | indent 8 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "helm-project-operator.name" . }}-config

View File

@ -0,0 +1,68 @@
{{- if .Values.global.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "helm-project-operator.name" . }}-psp
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
{{- if .Values.global.rbac.pspAnnotations }}
annotations: {{ toYaml .Values.global.rbac.pspAnnotations | nindent 4 }}
{{- end }}
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
# Permits the container to run with root privileges as well.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 0
max: 65535
readOnlyRootFilesystem: false
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "helm-project-operator.name" . }}-psp
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
rules:
{{- if semverCompare "> 1.15.0-0" .Capabilities.KubeVersion.GitVersion }}
- apiGroups: ['policy']
{{- else }}
- apiGroups: ['extensions']
{{- end }}
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "helm-project-operator.name" . }}-psp
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "helm-project-operator.name" . }}-psp
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "helm-project-operator.name" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "helm-project-operator.name" . }}
namespace: {{ template "helm-project-operator.namespace" . }}
{{- end }}

View File

@ -0,0 +1,123 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "helm-project-operator.name" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "cluster-admin" # see note below
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: {{ template "helm-project-operator.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "helm-project-operator.name" . }}
namespace: {{ template "helm-project-operator.namespace" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "helm-project-operator.name" . }}
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
app: {{ template "helm-project-operator.name" . }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
{{- end }}
# ---
# NOTE:
# As of now, due to the fact that the k3s-io/helm-controller can only deploy jobs that are cluster-bound to the cluster-admin
# ClusterRole, the only way for this operator to be able to perform that binding is if it is also bound to the cluster-admin ClusterRole.
#
# As a result, this ClusterRole will be left as a work-in-progress until changes are made in k3s-io/helm-controller to allow us to grant
# only scoped down permissions to the Job that is deployed.
#
# apiVersion: rbac.authorization.k8s.io/v1
# kind: ClusterRole
# metadata:
# name: {{ template "helm-project-operator.name" . }}
# labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
# app: {{ template "helm-project-operator.name" . }}
# rules:
# # Helm Project Operator
# - apiGroups:
# - helm.cattle.io
# resources:
# - projecthelmcharts
# - projecthelmcharts/status
# verbs:
# - "*"
# - apiGroups:
# - ""
# resources:
# - namespaces
# verbs:
# - create
# - get
# - list
# - watch
# - update
# - patch
# # Helm Controller
# - apiGroups:
# - helm.cattle.io
# resources:
# - helmcharts
# - helmcharts/status
# - helmchartconfigs
# - helmchartconfigs/status
# verbs:
# - "*"
# - apiGroups:
# - batch
# resources:
# - jobs
# verbs:
# - "*"
# - apiGroups:
# - ""
# resources:
# - serviceaccounts
# verbs:
# - "*"
# - apiGroups:
# - rbac.authorization.k8s.io
# resources:
# - clusterrolebindings
# verbs:
# - "*"
# # Helm Locker
# - apiGroups:
# - helm.cattle.io
# resources:
# - helmreleases
# - helmreleases/status
# verbs:
# - "*"
# - apiGroups:
# - ""
# resources:
# - secrets
# verbs:
# - create
# - get
# - list
# - watch
# - update
# - patch
# # Common
# - apiGroups:
# - ""
# resources:
# - configmaps
# verbs:
# - "*"
# - apiGroups:
# - ""
# - events.k8s.io
# resources:
# - events
# verbs:
# - "*"

View File

@ -0,0 +1,62 @@
{{- if .Values.systemNamespacesConfigMap.create }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "helm-project-operator.name" . }}-system-namespaces
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
data:
system-namespaces.json: |-
{
{{- if .Values.projectReleaseNamespaces.enabled }}
{{- if .Values.projectReleaseNamespaces.labelValue }}
"projectReleaseLabelValue": {{ .Values.projectReleaseNamespaces.labelValue | quote }},
{{- else if .Values.global.cattle.systemProjectId }}
"projectReleaseLabelValue": {{ .Values.global.cattle.systemProjectId | quote }},
{{- else }}
"projectReleaseLabelValue": "",
{{- end }}
{{- else }}
"projectReleaseLabelValue": "",
{{- end }}
{{- if not .Values.projectReleaseNamespaces.enabled }}
"systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }}
{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }}
"systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }}
{{- else if len .Values.otherSystemProjectLabelValues }}
"systemProjectLabelValues": {{ .Values.otherSystemProjectLabelValues | toJson }}
{{- else }}
"systemProjectLabelValues": []
{{- end }}
}
---
{{- if (and .Values.systemNamespacesConfigMap.rbac.enabled .Values.systemNamespacesConfigMap.rbac.subjects) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "helm-project-operator.name" . }}-system-namespaces
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- "{{ template "helm-project-operator.name" . }}-system-namespaces"
verbs:
- 'get'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "helm-project-operator.name" . }}-system-namespaces
namespace: {{ template "helm-project-operator.namespace" . }}
labels: {{ include "helm-project-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "helm-project-operator.name" . }}-system-namespaces
subjects: {{ .Values.systemNamespacesConfigMap.rbac.subjects | toYaml | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,184 @@
# Default values for helm-project-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Helm Project Operator Configuration
global:
cattle:
clusterId: ""
projectLabel: field.cattle.io/projectId
systemDefaultRegistry: ""
systemProjectId: ""
url: ""
rbac:
## Create RBAC resources for ServiceAccounts and users
##
create: true
userRoles:
## Create default user ClusterRoles to allow users to interact with ProjectHelmCharts
create: true
## Aggregate default user ClusterRoles into default k8s ClusterRoles
aggregateToDefaultRoles: true
pspEnabled: true
pspAnnotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
helmApiVersion: dummy.cattle.io/v1alpha1
## valuesOverride overrides values that are set on each ProjectHelmChart deployment on an operator-level
## User-provided values will be overwritten based on the values provided here
valuesOverride: {}
## projectReleaseNamespaces are auto-generated namespaces that are created to host Helm Releases
## managed by this operator on behalf of a ProjectHelmChart
projectReleaseNamespaces:
## Enabled determines whether Project Release Namespaces should be created. If false, the underlying
## Helm release will be deployed in the Project Registration Namespace
enabled: true
## labelValue is the value of the Project that the projectReleaseNamespace should be created within
## If empty, this will be set to the value of global.cattle.systemProjectId
## If global.cattle.systemProjectId is also empty, project release namespaces will be disabled
labelValue: ""
## otherSystemProjectLabelValues are project labels that identify namespaces as those that should be treated as system projects
## i.e. they will be entirely ignored by the operator
## By default, the global.cattle.systemProjectId will be in this list
otherSystemProjectLabelValues: []
## releaseRoleBindings configures RoleBindings automatically created by the Helm Project Operator
## in Project Release Namespaces where underlying Helm charts are deployed
releaseRoleBindings:
## aggregate enables creating these RoleBindings off aggregating RoleBindings in the
## Project Registration Namespace or ClusterRoleBindings that bind users to the ClusterRoles
## specified under clusterRoleRefs
aggregate: true
## clusterRoleRefs are the ClusterRoles whose RoleBinding or ClusterRoleBindings should determine
## the RoleBindings created in the Project Release Namespace
##
## By default, these are set to create RoleBindings based on the RoleBindings / ClusterRoleBindings
## attached to the default K8s user-facing ClusterRoles of admin, edit, and view.
## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
##
clusterRoleRefs:
admin: admin
edit: edit
view: view
hardenedNamespaces:
# Whether to automatically manage the configuration of the default ServiceAccount and
# auto-create a NetworkPolicy for each namespace created by this operator
enabled: true
configuration:
# Values to be applied to each default ServiceAccount created in a managed namespace
serviceAccountSpec:
secrets: []
imagePullSecrets: []
automountServiceAccountToken: false
# Values to be applied to each default generated NetworkPolicy created in a managed namespace
networkPolicySpec:
podSelector: {}
egress: []
ingress: []
policyTypes: ["Ingress", "Egress"]
## systemNamespacesConfigMap is a ConfigMap created to allow users to see valid entries
## for registering a ProjectHelmChart for a given Project on the Rancher Dashboard UI.
## It does not need to be enabled for a non-Rancher use case.
systemNamespacesConfigMap:
## Create indicates whether the system namespaces configmap should be created
## This is a required value for integration with Rancher Dashboard
create: true
## RBAC provides options around the RBAC created to allow users to be able to view
## the systemNamespacesConfigMap; if not specified, only users with the ability to
## view ConfigMaps in the namespace where this chart is deployed will be able to
## properly view the system namespaces on the Rancher Dashboard UI
rbac:
## enabled indicates that we should deploy a RoleBinding and Role to view this ConfigMap
enabled: true
## subjects are the subjects that should be bound to this default RoleBinding
## By default, we allow anyone who is authenticated to the system to be able to view
## this ConfigMap in the deployment namespace
subjects:
- kind: Group
name: system:authenticated
nameOverride: ""
namespaceOverride: ""
image:
repository: rancher/helm-project-operator
tag: v0.0.1
pullPolicy: IfNotPresent
helmController:
# Note: should be disabled for RKE2 clusters since they already run Helm Controller to manage internal Kubernetes components
enabled: true
job:
image:
repository: rancher/klipper-helm
tag: v0.7.0-build20220315
helmLocker:
enabled: true
# Additional arguments to be passed into the Helm Project Operator image
additionalArgs: []
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for use with node taints
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
resources: {}
# limits:
# memory: 500Mi
# cpu: 1000m
# requests:
# memory: 100Mi
# cpu: 100m
securityContext: {}
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
debug: false
debugLevel: 0
cleanup:
image:
repository: rancher/shell
tag: v0.1.14
nodeSelector: {}
tolerations: []

View File

@ -0,0 +1,37 @@
questions:
- variable: helmProjectOperator.helmController.enabled
label: Enable Embedded Helm Controller
description: 'Note: If you are running Prometheus Federator in an RKE2 cluster, this should be disabled.'
type: boolean
group: Helm Controller
- variable: helmProjectOperator.helmLocker.enabled
label: Enable Embedded Helm Locker
type: boolean
group: Helm Locker
- variable: helmProjectOperator.projectReleaseNamespaces.labelValue
label: Project Release Namespace Project ID
description: By default, the System Project is selected. This can be overriden to a different Project (e.g. p-xxxxx)
type: string
required: false
group: Namespaces
- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin
label: Admin ClusterRole
description: By default, admin selects Project Owners. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: admin
required: false
group: RBAC
- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit
label: Edit ClusterRole
description: By default, edit selects Project Members. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: edit
required: false
group: RBAC
- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view
label: View ClusterRole
description: By default, view selects Read-Only users. This can be overridden to a different ClusterRole (e.g. rt-xxxxx)
type: string
default: view
required: false
group: RBAC

View File

@ -0,0 +1,3 @@
{{ $.Chart.Name }} has been installed. Check its status by running:
kubectl --namespace {{ template "prometheus-federator.namespace" . }} get pods -l "release={{ $.Release.Name }}"

View File

@ -0,0 +1,66 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
# Helm Project Operator
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}}
{{- define "prometheus-federator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "prometheus-federator.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/* Create chart name and version as used by the chart label. */}}
{{- define "prometheus-federator.chartref" -}}
{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
{{- end }}
{{/* Generate basic labels */}}
{{- define "prometheus-federator.labels" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}"
app.kubernetes.io/part-of: {{ template "prometheus-federator.name" . }}
chart: {{ template "prometheus-federator.chartref" . }}
release: {{ $.Release.Name | quote }}
heritage: {{ $.Release.Service | quote }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,92 @@
# Default values for helm-project-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Prometheus Federator Configuration
global:
cattle:
systemDefaultRegistry: ""
projectLabel: field.cattle.io/projectId
clusterId: ""
systemProjectId: ""
url: ""
rbac:
pspEnabled: true
pspAnnotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
helmProjectOperator:
enabled: true
# ensures that all resources created by subchart show up as prometheus-federator
helmApiVersion: monitoring.cattle.io/v1alpha1
nameOverride: prometheus-federator
helmController:
# Note: should be disabled for RKE2 clusters since they already run Helm Controller to manage internal Kubernetes components
enabled: true
helmLocker:
enabled: true
## valuesOverride overrides values that are set on each Project Prometheus Stack Helm Chart deployment on an operator level
## all values provided here will override any user-provided values automatically
valuesOverride:
federate:
# Change this to point at all Prometheuses you want all your Project Prometheus Stacks to federate from
# By default, this matches the default deployment of Rancher Monitoring
targets:
- rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090
image:
repository: rancher/prometheus-federator
tag: v0.1.0
pullPolicy: IfNotPresent
# Additional arguments to be passed into the Prometheus Federator image
additionalArgs: []
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for use with node taints
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
resources: {}
# limits:
# memory: 500Mi
# cpu: 1000m
# requests:
# memory: 100Mi
# cpu: 100m
securityContext: {}
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
debug: false
debugLevel: 0

View File

@ -0,0 +1,12 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/release-name: rancher-aks-operator-crd
apiVersion: v2
appVersion: 1.0.5
description: AKS Operator CustomResourceDefinitions
name: rancher-aks-operator-crd
version: 100.0.4+up1.0.5

View File

@ -0,0 +1,178 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
name: aksclusterconfigs.aks.cattle.io
spec:
group: aks.cattle.io
names:
kind: AKSClusterConfig
plural: aksclusterconfigs
shortNames:
- akscc
singular: aksclusterconfig
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
authBaseUrl:
nullable: true
type: string
authorizedIpRanges:
items:
nullable: true
type: string
nullable: true
type: array
azureCredentialSecret:
nullable: true
type: string
baseUrl:
nullable: true
type: string
clusterName:
nullable: true
type: string
dnsPrefix:
nullable: true
type: string
dnsServiceIp:
nullable: true
type: string
dockerBridgeCidr:
nullable: true
type: string
httpApplicationRouting:
nullable: true
type: boolean
imported:
type: boolean
kubernetesVersion:
nullable: true
type: string
linuxAdminUsername:
nullable: true
type: string
loadBalancerSku:
nullable: true
type: string
logAnalyticsWorkspaceGroup:
nullable: true
type: string
logAnalyticsWorkspaceName:
nullable: true
type: string
monitoring:
nullable: true
type: boolean
networkPlugin:
nullable: true
type: string
networkPolicy:
nullable: true
type: string
nodePools:
items:
properties:
availabilityZones:
items:
nullable: true
type: string
nullable: true
type: array
count:
nullable: true
type: integer
enableAutoScaling:
nullable: true
type: boolean
maxCount:
nullable: true
type: integer
maxPods:
nullable: true
type: integer
minCount:
nullable: true
type: integer
mode:
nullable: true
type: string
name:
nullable: true
type: string
orchestratorVersion:
nullable: true
type: string
osDiskSizeGB:
nullable: true
type: integer
osDiskType:
nullable: true
type: string
osType:
nullable: true
type: string
vmSize:
nullable: true
type: string
type: object
nullable: true
type: array
podCidr:
nullable: true
type: string
privateCluster:
nullable: true
type: boolean
resourceGroup:
nullable: true
type: string
resourceLocation:
nullable: true
type: string
serviceCidr:
nullable: true
type: string
sshPublicKey:
nullable: true
type: string
subnet:
nullable: true
type: string
tags:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
virtualNetwork:
nullable: true
type: string
virtualNetworkResourceGroup:
nullable: true
type: string
type: object
status:
properties:
failureMessage:
nullable: true
type: string
phase:
nullable: true
type: string
rbacEnabled:
nullable: true
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-aks-operator-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/kube-version: '>= 1.18.0-0'
catalog.cattle.io/namespace: cattle-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: aksclusterconfigs.aks.cattle.io/v1
catalog.cattle.io/rancher-version: '>= 2.6.0-alpha'
catalog.cattle.io/release-name: rancher-aks-operator
catalog.cattle.io/scope: management
apiVersion: v2
appVersion: 1.0.5
description: A Helm chart for provisioning AKS clusters
home: https://github.com/rancher/aks-operator
name: rancher-aks-operator
sources:
- https://github.com/rancher/aks-operator
version: 100.0.4+up1.0.5

View File

@ -0,0 +1,4 @@
You have deployed the Rancher AKS operator
Version: {{ .Chart.AppVersion }}
Description: This operator provisions AKS clusters
from AKSClusterConfig CRs.

View File

@ -0,0 +1,25 @@
{{/* vim: set filetype=mustache: */}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
kubernetes.io/os: linux
{{- end -}}

View File

@ -0,0 +1,15 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aks-operator
namespace: cattle-system
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list', 'create', 'watch', 'update']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs']
verbs: ['get', 'list', 'update', 'watch']
- apiGroups: ['aks.cattle.io']
resources: ['aksclusterconfigs/status']
verbs: ['update']

Some files were not shown because too many files have changed in this diff Show More